Panchovix commited on
Commit
d2c36d6
·
verified ·
1 Parent(s): 794dc00

Upload 44 files

Browse files
Files changed (45) hide show
  1. .gitattributes +3 -0
  2. BIAS.md +6 -0
  3. EXPLAINABILITY.md +14 -0
  4. PRIVACY.md +11 -0
  5. README.md +355 -5
  6. SAFETY_and_SECURITY.md +8 -0
  7. __init__.py +0 -0
  8. accuracy_plot.png +3 -0
  9. block_config.py +118 -0
  10. config.json +2972 -0
  11. configuration_decilm.py +65 -0
  12. model-00001-of-00015.safetensors +3 -0
  13. model-00002-of-00015.safetensors +3 -0
  14. model-00003-of-00015.safetensors +3 -0
  15. model-00004-of-00015.safetensors +3 -0
  16. model-00005-of-00015.safetensors +3 -0
  17. model-00006-of-00015.safetensors +3 -0
  18. model-00007-of-00015.safetensors +3 -0
  19. model-00008-of-00015.safetensors +3 -0
  20. model-00009-of-00015.safetensors +3 -0
  21. model-00010-of-00015.safetensors +3 -0
  22. model-00011-of-00015.safetensors +3 -0
  23. model-00012-of-00015.safetensors +3 -0
  24. model-00013-of-00015.safetensors +3 -0
  25. model-00014-of-00015.safetensors +3 -0
  26. model-00015-of-00015.safetensors +3 -0
  27. model.safetensors.index.json +0 -0
  28. modeling_decilm.py +1681 -0
  29. nemo_common.json +1 -0
  30. nemo_model_config.yaml +211 -0
  31. quantization_config.json +0 -0
  32. special_tokens_map.json +16 -0
  33. tokenizer.json +3 -0
  34. tokenizer_config.json +2063 -0
  35. tokenizer_name.txt +1 -0
  36. training_flowchart.png +3 -0
  37. transformers_4_44_2__activations.py +239 -0
  38. transformers_4_44_2__cache_utils.py +1347 -0
  39. transformers_4_44_2__configuration_llama.py +203 -0
  40. transformers_4_44_2__modeling_attn_mask_utils.py +482 -0
  41. transformers_4_44_2__modeling_flash_attention_utils_backward_compat.py +348 -0
  42. transformers_4_44_2__modeling_outputs.py +0 -0
  43. transformers_4_44_2__modeling_rope_utils.py +559 -0
  44. transformers_4_44_2__pytorch_utils.py +17 -0
  45. variable_cache.py +139 -0
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ accuracy_plot.png filter=lfs diff=lfs merge=lfs -text
37
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
+ training_flowchart.png filter=lfs diff=lfs merge=lfs -text
BIAS.md ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # **Bias**
2
+
3
+ |Field:|Response:|
4
+ |:---:|:---:|
5
+ |Participation considerations from adversely impacted groups (protected classes) in model design and testing:|None|
6
+ |Measures taken to mitigate against unwanted bias:|None|
EXPLAINABILITY.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # **Explainability**
2
+
3
+ |Field:|Response:|
4
+ |:---:|:---:|
5
+ |Intended Application(s) & Domain(s):| Text generation, reasoning, summarization, and question answering. |
6
+ |Model Type: |Text-to-text transformer |
7
+ |Intended Users:|This model is intended for developers, researchers, and customers building/utilizing LLMs, while balancing accuracy and efficiency.|
8
+ |Output:|Text String(s)|
9
+ |Describe how the model works:|Generates text by predicting the next word or token based on the context provided in the input sequence using multiple self-attention layers|
10
+ |Technical Limitations:| The model was trained on data that contains toxic language, unsafe content, and societal biases originally crawled from the internet. Therefore, the model may amplify those biases and return toxic responses especially when prompted with toxic prompts. The model may generate answers that may be inaccurate, omit key information, or include irrelevant or redundant text producing socially unacceptable or undesirable text, even if the prompt itself does not include anything explicitly offensive.\<br/\>The model demonstrates weakness to alignment-breaking attacks. Users are advised to deploy language model guardrails alongside this model to prevent potentially harmful outputs.\<br/\>The Model may generate answers that are inaccurate, omit key information, or include irrelevant or redundant text.|
11
+ |Verified to have met prescribed quality standards?|Yes|
12
+ |Performance Metrics:|Accuracy, Throughput, and user-side throughput|
13
+ |Potential Known Risks:|The model was optimized explicitly for instruction following and as such is more susceptible to prompt injection and jailbreaking in various forms as a result of its instruction tuning. This means that the model should be paired with additional rails or system filtering to limit exposure to instructions from malicious sources -- either directly or indirectly by retrieval (e.g. via visiting a website) -- as they may yield outputs that can lead to harmful, system-level outcomes up to and including remote code execution in agentic systems when effective security controls including guardrails are not in place.\<br/\>The model was trained on data that contains toxic language and societal biases originally crawled from the internet. Therefore, the model may amplify those biases and return toxic responses especially when prompted with toxic prompts. The model may generate answers that may be inaccurate, omit key information, or include irrelevant or redundant text producing socially unacceptable or undesirable text, even if the prompt itself does not include anything explicitly offensive.|
14
+ |End User License Agreement:| Your use of this model is governed by the [NVIDIA Open Model License](https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/). Additional Information: [Llama 3.1 Community License Agreement](https://www.llama.com/llama3\_1/license/). Built with Llama. |
PRIVACY.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # **Privacy**
2
+
3
+ |Field:|Response:|
4
+ |:---:|:---:|
5
+ |Generatable or Reverse engineerable personally-identifiable information?|None|
6
+ |Was consent obtained for any personal data used?|None Known|
7
+ |Personal data used to create this model?|None Known|
8
+ |How often is dataset reviewed?|Before Release|
9
+ |Is there provenance for all datasets used in training?|Yes|
10
+ |Does data labeling (annotation, metadata) comply with privacy laws?|Yes|
11
+ |Applicable NVIDIA Privacy Policy|https://www.nvidia.com/en-us/about-nvidia/privacy-policy/|
README.md CHANGED
@@ -1,5 +1,355 @@
1
- ---
2
- license: other
3
- license_name: other
4
- license_link: LICENSE
5
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ license_name: nvidia-open-model-license
5
+ license_link: >-
6
+ https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/
7
+
8
+ pipeline_tag: text-generation
9
+ language:
10
+ - en
11
+ tags:
12
+ - nvidia
13
+ - llama-3
14
+ - pytorch
15
+ ---
16
+
17
+ # Llama-3.1-Nemotron-Ultra-253B-v1
18
+
19
+ ## Model Overview
20
+
21
+ ![Accuracy Plot](./accuracy_plot.png)
22
+
23
+ Llama-3.1-Nemotron-Ultra-253B-v1 is a large language model (LLM) which is a derivative of [Meta Llama-3.1-405B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct) (AKA the *reference model*). It is a reasoning model that is post trained for reasoning, human chat preferences, and tasks, such as RAG and tool calling. The model supports a context length of 128K tokens. This model fits on a single 8xH100 node for inference.
24
+
25
+ Llama-3.1-Nemotron-Ultra-253B-v1 is a model which offers a great tradeoff between model accuracy and efficiency. Efficiency (throughput) directly translates to savings. Using a novel Neural Architecture Search (NAS) approach, we greatly reduce the model’s memory footprint, enabling larger workloads, as well as reducing the number of GPUs required to run the model in a data center environment. This NAS approach enables the selection of a desired point in the accuracy-efficiency tradeoff. Furthermore, by using a novel method to vertically compress the model (see details [here](https://arxiv.org/abs/2503.18908)), it also offers a significant improvement in latency.
26
+
27
+ The model underwent a multi-phase post-training process to enhance both its reasoning and non-reasoning capabilities. This includes a supervised fine-tuning stage for Math, Code, Reasoning, Chat, and Tool Calling as well as multiple reinforcement learning (RL) stages using Group Relative Policy Optimization (GRPO) algorithms for reasoning, chat, and instruction-following.
28
+
29
+ This model is ready for commercial use.
30
+
31
+ For more details on how the model was trained, please see [this blog](https://developer.nvidia.com/blog/build-enterprise-ai-agents-with-advanced-open-nvidia-llama-nemotron-reasoning-models/).
32
+
33
+ ![Training Flow](./training_flowchart.png)
34
+
35
+ This model is part of the Llama Nemotron Collection. You can find the other model(s) in this family here:
36
+
37
+ - [Llama-3.1-Nemotron-Nano-8B-v1](https://huggingface.co/nvidia/Llama-3.1-Nemotron-Nano-8B-v1)
38
+ - [Llama-3.3-Nemotron-Super-49B-v1](https://huggingface.co/nvidia/Llama-3\_3-Nemotron-Super-49B-v1)
39
+
40
+ ## License/Terms of Use
41
+
42
+ GOVERNING TERMS: Your use of this model is governed by the [NVIDIA Open Model License.](https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/) Additional Information: [Llama 3.1 Community License Agreement](https://www.llama.com/llama3\_1/license/). Built with Llama.
43
+
44
+ **Model Developer:** NVIDIA
45
+
46
+ **Model Dates:** Trained between November 2024 and April 2025
47
+
48
+ **Data Freshness:** The pretraining data has a cutoff of 2023 per Llama-3.1-405B-Instruct
49
+
50
+ ### Use Case:
51
+ Developers designing AI Agent systems, chatbots, RAG systems, and other AI-powered applications. Also suitable for typical instruction-following tasks.
52
+
53
+ ### Release Date:
54
+ 2025-04-07
55
+
56
+ ## References
57
+
58
+ * [\[2502.00203\] Reward-aware Preference Optimization: A Unified Mathematical Framework for Model Alignment](https://arxiv.org/abs/2502.00203)
59
+ * [\[2411.19146\]Puzzle: Distillation-Based NAS for Inference-Optimized LLMs](https://arxiv.org/abs/2411.19146)
60
+ * [\[2503.18908\]FFN Fusion: Rethinking Sequential Computation in Large Language Models](https://arxiv.org/abs/2503.18908)
61
+
62
+ ## Model Architecture
63
+ **Architecture Type:** Dense decoder-only Transformer model
64
+ **Network Architecture:** Llama-3.1-405B-Instruct, customized through Neural Architecture Search (NAS)
65
+
66
+ **This model was developed based on Llama-3.1-405B-Instruct <br>
67
+ ** This model has 253B model parameters. <br>
68
+
69
+ The model is a derivative of Llama 3.1-405B-Instruct, using Neural Architecture Search (NAS). The NAS algorithm results in non-standard and non-repetitive blocks. This includes the following:
70
+
71
+ * Skip attention: In some blocks, the attention is skipped entirely, or replaced with a single linear layer.
72
+ * Variable FFN: The expansion/compression ratio in the FFN layer is different between blocks.
73
+ * FFN Fusion: When several consecutive attention layers are skipped, which can result in a sequence of multiple FFNs, that sequence of FFNs are fused into a smaller number of wider FFN layers.
74
+
75
+ For each block of the reference model, we create multiple variants providing different tradeoffs of quality vs. computational complexity, discussed in more depth below. We then search over the blocks to create a model which meets the required throughput and memory while minimizing the quality degradation. To recover performance, the model initially undergoes knowledge distillation (KD) for 65 billion tokens. This is followed by a continual pretraining (CPT) phase for 88 billion tokens.
76
+
77
+ ## Intended use
78
+
79
+ Llama-3.1-Nemotron-Ultra-253B-v1 is a general purpose reasoning and chat model intended to be used in English and coding languages. Other non-English languages (German, French, Italian, Portuguese, Hindi, Spanish, and Thai) are also supported.
80
+
81
+ ## Input
82
+ - **Input Type:** Text
83
+ - **Input Format:** String
84
+ - **Input Parameters:** One-Dimensional (1D)
85
+ - **Other Properties Related to Input:** Context length up to 131,072 tokens
86
+
87
+ ## Output
88
+ - **Output Type:** Text
89
+ - **Output Format:** String
90
+ - **Output Parameters:** One-Dimensional (1D)
91
+ - **Other Properties Related to Output:** Context length up to 131,072 tokens
92
+
93
+ ## Software Integration
94
+ - **Runtime Engine:** Transformers
95
+ - **Recommended Hardware Microarchitecture Compatibility:**
96
+ - NVIDIA Hopper
97
+ - NVIDIA Ampere
98
+ - **Preferred Operating System(s):** Linux
99
+
100
+ ## Model Version
101
+ 1.0 (4/7/2025)
102
+
103
+ ## Quick Start and Usage Recommendations:
104
+
105
+ 1. Reasoning mode (ON/OFF) is controlled via the system prompt, which must be set as shown in the example below. All instructions should be contained within the user prompt
106
+ 2. We recommend setting temperature to \`0.6\`, and Top P to \`0.95\` for Reasoning ON mode
107
+ 3. We recommend using greedy decoding (temperature 0\) for Reasoning OFF mode
108
+ 4. We do not recommend to add additional system prompts besides the control prompt, all instructions should be put into user query
109
+ 5. We have provided a list of prompts to use for evaluation for each benchmark where a specific template is required
110
+
111
+ You can try this model out through the preview API, using this link: [Llama-3\_1-Nemotron-Ultra-253B-v1](https://build.nvidia.com/nvidia/llama-3\_1-nemotron-ultra-253b-v1).
112
+
113
+ ### Use It with Transformers
114
+ See the snippet below for usage with [Hugging Face Transformers](https://huggingface.co/docs/transformers/main/en/index) library. Reasoning mode (ON/OFF) is controlled via system prompt. Please see the example below
115
+
116
+ We recommend using the *transformers* package with version 4.48.3.
117
+ Example of reasoning on:
118
+
119
+ ```py
120
+ import torch
121
+ import transformers
122
+
123
+ model_id = "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1"
124
+ model_kwargs = {"torch_dtype": torch.bfloat16, "trust_remote_code": True, "device_map": "auto"}
125
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
126
+ tokenizer.pad_token_id = tokenizer.eos_token_id
127
+
128
+ pipeline = transformers.pipeline(
129
+ "text-generation",
130
+ model=model_id,
131
+ tokenizer=tokenizer,
132
+ max_new_tokens=32768,
133
+ temperature=0.6,
134
+ top_p=0.95,
135
+ **model_kwargs
136
+ )
137
+
138
+ thinking = "on"
139
+
140
+
141
+ print(pipeline([{"role": "system", "content": f"detailed thinking {thinking}"},{"role": "user", "content": "Solve x*(sin(x)+2)=0"}]))
142
+ ```
143
+
144
+ Example of reasoning off:
145
+
146
+ ```py
147
+ import torch
148
+ import transformers
149
+
150
+ model_id = "nvidia/Llama-3_1-Nemotron-ULtra-253B-v1"
151
+ model_kwargs = {"torch_dtype": torch.bfloat16, "trust_remote_code": True, "device_map": "auto"}
152
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
153
+ tokenizer.pad_token_id = tokenizer.eos_token_id
154
+
155
+ pipeline = transformers.pipeline(
156
+ "text-generation",
157
+ model=model_id,
158
+ tokenizer=tokenizer,
159
+ max_new_tokens=32768,
160
+ do_sample=False,
161
+ **model_kwargs
162
+ )
163
+
164
+ thinking = "off"
165
+
166
+
167
+ print(pipeline([{"role": "system", "content": f"detailed thinking {thinking}"},{"role": "user", "content": "Solve x*(sin(x)+2)=0"}]))
168
+ ```
169
+
170
+ ### Use It with vLLM
171
+
172
+ ```
173
+ pip install vllm==0.8.3
174
+ ```
175
+ An example on how to serve with vLLM:
176
+ ```
177
+ python3 -m vllm.entrypoints.openai.api_server \
178
+ --model "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1" \
179
+ --trust-remote-code \
180
+ --seed=1 \
181
+ --host="0.0.0.0" \
182
+ --port=5000 \
183
+ --served-model-name "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1" \
184
+ --tensor-parallel-size=8 \
185
+ --max-model-len=32768 \
186
+ --gpu-memory-utilization 0.95 \
187
+ --enforce-eager
188
+ ```
189
+ ## Inference:
190
+ **Engine:**
191
+
192
+ - Transformers
193
+
194
+ **Test Hardware:**
195
+ - BF16:
196
+ - 8x NVIDIA H100-80GB
197
+ - 4x NVIDIA B100
198
+ - FP 8
199
+ - 4x NVIDIA H100-80GB
200
+
201
+ ## Training and Evaluation Datasets
202
+
203
+ ## Training Datasets
204
+
205
+ A large variety of training data was used for the knowledge distillation phase before post-training pipeline, 3 of which included: FineWeb, Buzz-V1.2, and Dolma.
206
+
207
+ The data for the multi-stage post-training phases is a compilation of SFT and RL data that supports improvements of math, code, general reasoning, and instruction following capabilities of the original Llama instruct model.
208
+
209
+ Prompts have been sourced from either public and open corpus or synthetically generated. Responses were synthetically generated by a variety of models, with some prompts containing responses for both reasoning on and off modes, to train the model to distinguish between two modes. This model was improved with Qwen.
210
+
211
+ We have released our [Llama-Nemotron-Post-Training-Dataset](https://huggingface.co/datasets/nvidia/Llama-Nemotron-Post-Training-Dataset) to promote openness and transparency in model development and improvement.
212
+
213
+ **Data Collection for Training Datasets:**
214
+
215
+ - Hybrid: Automated, Human, Synthetic
216
+
217
+ **Data Labeling for Training Datasets:**
218
+
219
+ - Hybrid: Automated, Human, Synthetic
220
+
221
+ ## Evaluation Datasets
222
+
223
+ We used the datasets listed in the next section to evaluate Llama-3.1-Nemotron-Ultra-253B-v1.
224
+
225
+ Data Collection for Evaluation Datasets:
226
+
227
+ - Hybrid: Human/Synthetic
228
+
229
+ Data Labeling for Evaluation Datasets:
230
+
231
+ - Hybrid: Human/Synthetic/Automatic
232
+
233
+ ## Evaluation Results
234
+ *These results contain both Reasoning On, and Reasoning Off. We recommend using temperature=\`0.6\`, top\_p=\`0.95\` for Reasoning On mode, and greedy decoding for Reasoning Off mode. All evaluations are done with 32k sequence length. We run the benchmarks up to 16 times and average the scores to be more accurate.*
235
+
236
+
237
+ > NOTE: Where applicable, a Prompt Template will be provided. While completing benchmarks, please ensure that you are parsing for the correct output format as per the provided prompt in order to reproduce the benchmarks seen below.
238
+
239
+ ### GPQA
240
+
241
+ | Reasoning Mode | pass@1 |
242
+ |--------------|------------|
243
+ | Reasoning Off | 56.60 |
244
+ | Reasoning On | 76.01 |
245
+
246
+ User Prompt Template:
247
+
248
+ ```
249
+ "What is the correct answer to this question: {question}\nChoices:\nA. {option_A}\nB. {option_B}\nC. {option_C}\nD. {option_D}\nLet's think step by step, and put the final answer (should be a single letter A, B, C, or D) into a \boxed{}"
250
+ ```
251
+
252
+ ### AIME25
253
+
254
+ | Reasoning Mode | pass@1 |
255
+ |--------------|------------|
256
+ | Reasoning Off | 16.67 |
257
+ | Reasoning On | 72.50 |
258
+
259
+ User Prompt Template:
260
+
261
+ ```
262
+ "Below is a math question. I want you to reason through the steps and then give a final answer. Your final answer should be in \boxed{}.\nQuestion: {question}"
263
+ ```
264
+
265
+ ### BFCL V2 Live
266
+
267
+ | Reasoning Mode | Score |
268
+ |--------------|------------|
269
+ | Reasoning Off | 73.62 |
270
+ | Reasoning On | 74.10 |
271
+
272
+ User Prompt Template:
273
+
274
+ ```
275
+ You are an expert in composing functions. You are given a question and a set of possible functions.
276
+ Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
277
+ If none of the function can be used, point it out. If the given question lacks the parameters required by the function,
278
+ also point it out. You should only return the function call in tools call sections.
279
+
280
+ If you decide to invoke any of the function(s), you MUST put it in the format of <TOOLCALL>[func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]</TOOLCALL>
281
+
282
+ You SHOULD NOT include any other text in the response.
283
+ Here is a list of functions in JSON format that you can invoke.
284
+
285
+ <AVAILABLE_TOOLS>{functions}</AVAILABLE_TOOLS>
286
+
287
+ {user_prompt}
288
+ ```
289
+
290
+ ### LiveCodeBench (20240801-20250201)
291
+
292
+ | Reasoning Mode | pass@1 |
293
+ |--------------|------------|
294
+ | Reasoning Off | 29.03 |
295
+ | Reasoning On | 66.31 |
296
+
297
+ User Prompt Template (without starter code):
298
+
299
+ ````
300
+ "You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests.
301
+
302
+ Question: {prompt}
303
+
304
+ Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows. Ensure that when the python program runs, it reads the inputs, runs the algorithm and writes output to STDOUT.
305
+ ```python
306
+ # YOUR CODE HERE
307
+ ```
308
+ ````
309
+
310
+ User Prompt Template (with starter code):
311
+
312
+ ````
313
+ You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests.
314
+
315
+ Question: {prompt}
316
+
317
+ You will use the following starter code to write the solution to the problem and enclose your code within delimiters.
318
+ ```python
319
+ {starter_code}
320
+ ```
321
+ ````
322
+
323
+ ### IFEval
324
+
325
+ | Reasoning Mode | Strict:Instruction |
326
+ |--------------|------------|
327
+ | Reasoning Off | 88.85 |
328
+ | Reasoning On | 89.45 |
329
+
330
+ ### MATH500
331
+
332
+ | Reasoning Mode | pass@1 |
333
+ |--------------|------------|
334
+ | Reasoning Off | 80.40 |
335
+ | Reasoning On | 97.00 |
336
+
337
+ User Prompt Template:
338
+
339
+ ```
340
+ "Below is a math question. I want you to reason through the steps and then give a final answer. Your final answer should be in \boxed{}.\nQuestion: {question}"
341
+ ```
342
+
343
+ ### JudgeBench
344
+
345
+ | Reasoning Mode | Knowledge Score | Reasoning Score | Math Score | Coding Score | Overall Score |
346
+ |--------------|------------|------------|------------|------------|------------|
347
+ | Reasoning On | 70.13 | 81.63 | 89.29 | 92.86 | 79.14 |
348
+
349
+ ## Ethical Considerations:
350
+
351
+ NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse.
352
+
353
+ For more detailed information on ethical considerations for this model, please see the Model Card++ [Explainability](./EXPLAINABILITY.md), [Bias](./BIAS.md), [Safety & Security](./SAFETY_and_SECURITY.md), and [Privacy](./PRIVACY.md) Subcards.
354
+
355
+ Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/).
SAFETY_and_SECURITY.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # **Safety & Security**
2
+
3
+ |Field:|Response:|
4
+ |:---:|:---:|
5
+ |Model Application(s):|Chat, Instruction Following, Chatbot Development, Code Generation, Reasoning|
6
+ |Describe life critical application (if present):|None Known (please see referenced Known Risks in the Explainability subcard).|
7
+ |Use Case Restrictions:|Abide by the [NVIDIA Open Model License](https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/). Additional Information: [Llama 3.1 Community License Agreement](https://www.llama.com/llama3\_1/license/). Built with Llama.|
8
+ |Model and Dataset Restrictions:|The Principle of least privilege (PoLP) is applied limiting access for dataset generation. Restrictions enforce dataset access during training, and dataset license constraints adhered to. Model checkpoints are made available on Hugging Face and NGC, and may become available on cloud providers' model catalog.|
__init__.py ADDED
File without changes
accuracy_plot.png ADDED

Git LFS Details

  • SHA256: ded932e846454a7691f2e0cc2c916132731eba4df00f488b7bfaf7bc359b2d60
  • Pointer size: 132 Bytes
  • Size of remote file: 1.6 MB
block_config.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import json
3
+ import warnings
4
+ from dataclasses import dataclass, MISSING
5
+ from functools import partial
6
+ from typing import Optional, Any
7
+
8
+
9
+ @partial(dataclass, frozen=True, kw_only=True)
10
+ class JsonComparable:
11
+ def to_json(self) -> str:
12
+ return json.dumps(dataclasses.asdict(self))
13
+
14
+ def __eq__(self, other: "JsonComparable") -> bool:
15
+ return self.to_json() == other.to_json()
16
+
17
+ def __hash__(self) -> int:
18
+ return hash(self.to_json())
19
+
20
+ def __lt__(self, other: "JsonComparable") -> bool:
21
+ return self.to_json() < other.to_json()
22
+
23
+
24
+ @partial(dataclass, frozen=True, kw_only=True)
25
+ class SubblockConfig(JsonComparable):
26
+ no_op: bool = False
27
+ replace_with_linear: bool = False
28
+ sparsify: Optional[list[str]] = None
29
+
30
+ def __post_init__(self):
31
+ assert not (self.no_op and self.replace_with_linear)
32
+
33
+ def _force_setattr(self, name: str, value: Any) -> None:
34
+ """
35
+ Set an attribute even in frozen dataclasses.
36
+ Use only inside __post_init__!
37
+ """
38
+ object.__setattr__(self, name, value)
39
+
40
+
41
+ @partial(dataclass, frozen=True, kw_only=True)
42
+ class AttentionConfig(SubblockConfig):
43
+ n_heads_in_group: Optional[int] = None
44
+ window_length: Optional[int] = None
45
+ num_sink_tokens: Optional[int] = None
46
+ use_prefill_window_in_sink_attention: bool = False
47
+ unshifted_sink: bool = False
48
+
49
+ def __post_init__(self):
50
+ super().__post_init__()
51
+ assert not (self.no_op and self.replace_with_linear)
52
+
53
+ if self.no_op or self.replace_with_linear:
54
+ for irrelevant_att in ["n_heads_in_group", "window_length", "num_sink_tokens"]:
55
+ self._force_setattr(irrelevant_att, None)
56
+ else:
57
+ assert self.n_heads_in_group is not None
58
+
59
+ if self.is_sink:
60
+ assert not (self.unshifted_sink and self.use_prefill_window_in_sink_attention), \
61
+ ("Unshifted sink uses its own kind of explicit masking, not standard window. "
62
+ "Set use_prefill_window_in_sink_attention to False.")
63
+ assert not (self.num_sink_tokens == 0 and not self.unshifted_sink), \
64
+ "Fake sink attention with 0 sink tokens is only supported with unshifted_sink=True"
65
+
66
+ @property
67
+ def prefill_sliding_window(self) -> Optional[int]:
68
+ if self.window_length is not None:
69
+ if not self.is_sink or self.use_prefill_window_in_sink_attention:
70
+ return self.window_length
71
+ return None
72
+
73
+ @property
74
+ def is_sliding(self) -> bool:
75
+ return self.prefill_sliding_window is not None
76
+
77
+ @property
78
+ def is_sink(self) -> bool:
79
+ return (
80
+ (self.window_length is not None)
81
+ and
82
+ (self.num_sink_tokens is not None)
83
+ )
84
+
85
+
86
+ @partial(dataclass, frozen=True, kw_only=True)
87
+ class FFNConfig(SubblockConfig):
88
+ ffn_mult: Optional[float] = None
89
+
90
+ def __post_init__(self):
91
+ super().__post_init__()
92
+ if self.no_op or self.replace_with_linear:
93
+ self._force_setattr("ffn_mult", None)
94
+ else:
95
+ assert self.ffn_mult is not None
96
+ self._force_setattr("ffn_mult", round(self.ffn_mult, 6))
97
+
98
+
99
+ @partial(dataclass, frozen=True, kw_only=True)
100
+ class BlockConfig(JsonComparable):
101
+ attention: AttentionConfig = MISSING
102
+ ffn: FFNConfig = MISSING
103
+
104
+ def __post_init__(self):
105
+ """
106
+ Init subblock dataclasses from dicts
107
+ """
108
+ for subblock_name in dataclasses.fields(self):
109
+ subblock_config = getattr(self, subblock_name.name)
110
+ if isinstance(subblock_config, dict):
111
+ subblock_fields = [field.name for field in dataclasses.fields(subblock_name.type)]
112
+ unsupported_fields = [field_name for field_name in subblock_config.keys()
113
+ if field_name not in subblock_fields]
114
+ if len(unsupported_fields) > 0:
115
+ warnings.warn(f"Removed unsupported fields {unsupported_fields} from {subblock_name.type.__name__}")
116
+ subblock_config = {k: v for k, v in subblock_config.items() if k not in unsupported_fields}
117
+ object.__setattr__(self, subblock_name.name,
118
+ subblock_name.type(**subblock_config)) # __setattr__ to overcome frozen=True
config.json ADDED
@@ -0,0 +1,2972 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "llama_nemotron_ultra",
3
+ "architectures": [
4
+ "DeciLMForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_decilm.DeciLMConfig",
10
+ "AutoModelForCausalLM": "modeling_decilm.DeciLMForCausalLM"
11
+ },
12
+ "block_configs": [
13
+ {
14
+ "attention": {
15
+ "n_heads_in_group": 16,
16
+ "no_op": false,
17
+ "num_sink_tokens": null,
18
+ "replace_with_linear": false,
19
+ "sparsify": null,
20
+ "unshifted_sink": false,
21
+ "use_prefill_window_in_sink_attention": false,
22
+ "window_length": null
23
+ },
24
+ "ffn": {
25
+ "ffn_mult": 0.4875,
26
+ "no_op": false,
27
+ "replace_with_linear": false,
28
+ "sparsify": null
29
+ }
30
+ },
31
+ {
32
+ "attention": {
33
+ "n_heads_in_group": 16,
34
+ "no_op": false,
35
+ "num_sink_tokens": null,
36
+ "replace_with_linear": false,
37
+ "sparsify": null,
38
+ "unshifted_sink": false,
39
+ "use_prefill_window_in_sink_attention": false,
40
+ "window_length": null
41
+ },
42
+ "ffn": {
43
+ "ffn_mult": 0.975,
44
+ "no_op": false,
45
+ "replace_with_linear": false,
46
+ "sparsify": null
47
+ }
48
+ },
49
+ {
50
+ "attention": {
51
+ "n_heads_in_group": 16,
52
+ "no_op": false,
53
+ "num_sink_tokens": null,
54
+ "replace_with_linear": false,
55
+ "sparsify": null,
56
+ "unshifted_sink": false,
57
+ "use_prefill_window_in_sink_attention": false,
58
+ "window_length": null
59
+ },
60
+ "ffn": {
61
+ "ffn_mult": 1.4625,
62
+ "no_op": false,
63
+ "replace_with_linear": false,
64
+ "sparsify": null
65
+ }
66
+ },
67
+ {
68
+ "attention": {
69
+ "n_heads_in_group": 16,
70
+ "no_op": false,
71
+ "num_sink_tokens": null,
72
+ "replace_with_linear": false,
73
+ "sparsify": null,
74
+ "unshifted_sink": false,
75
+ "use_prefill_window_in_sink_attention": false,
76
+ "window_length": null
77
+ },
78
+ "ffn": {
79
+ "ffn_mult": 1.4625,
80
+ "no_op": false,
81
+ "replace_with_linear": false,
82
+ "sparsify": null
83
+ }
84
+ },
85
+ {
86
+ "attention": {
87
+ "n_heads_in_group": 16,
88
+ "no_op": false,
89
+ "num_sink_tokens": null,
90
+ "replace_with_linear": false,
91
+ "sparsify": null,
92
+ "unshifted_sink": false,
93
+ "use_prefill_window_in_sink_attention": false,
94
+ "window_length": null
95
+ },
96
+ "ffn": {
97
+ "ffn_mult": 1.4625,
98
+ "no_op": false,
99
+ "replace_with_linear": false,
100
+ "sparsify": null
101
+ }
102
+ },
103
+ {
104
+ "attention": {
105
+ "n_heads_in_group": 16,
106
+ "no_op": false,
107
+ "num_sink_tokens": null,
108
+ "replace_with_linear": false,
109
+ "sparsify": null,
110
+ "unshifted_sink": false,
111
+ "use_prefill_window_in_sink_attention": false,
112
+ "window_length": null
113
+ },
114
+ "ffn": {
115
+ "ffn_mult": 1.4625,
116
+ "no_op": false,
117
+ "replace_with_linear": false,
118
+ "sparsify": null
119
+ }
120
+ },
121
+ {
122
+ "attention": {
123
+ "n_heads_in_group": 16,
124
+ "no_op": false,
125
+ "num_sink_tokens": null,
126
+ "replace_with_linear": false,
127
+ "sparsify": null,
128
+ "unshifted_sink": false,
129
+ "use_prefill_window_in_sink_attention": false,
130
+ "window_length": null
131
+ },
132
+ "ffn": {
133
+ "ffn_mult": 1.4625,
134
+ "no_op": false,
135
+ "replace_with_linear": false,
136
+ "sparsify": null
137
+ }
138
+ },
139
+ {
140
+ "attention": {
141
+ "n_heads_in_group": 16,
142
+ "no_op": false,
143
+ "num_sink_tokens": null,
144
+ "replace_with_linear": false,
145
+ "sparsify": null,
146
+ "unshifted_sink": false,
147
+ "use_prefill_window_in_sink_attention": false,
148
+ "window_length": null
149
+ },
150
+ "ffn": {
151
+ "ffn_mult": 1.4625,
152
+ "no_op": false,
153
+ "replace_with_linear": false,
154
+ "sparsify": null
155
+ }
156
+ },
157
+ {
158
+ "attention": {
159
+ "n_heads_in_group": 16,
160
+ "no_op": false,
161
+ "num_sink_tokens": null,
162
+ "replace_with_linear": false,
163
+ "sparsify": null,
164
+ "unshifted_sink": false,
165
+ "use_prefill_window_in_sink_attention": false,
166
+ "window_length": null
167
+ },
168
+ "ffn": {
169
+ "ffn_mult": 1.95,
170
+ "no_op": false,
171
+ "replace_with_linear": false,
172
+ "sparsify": null
173
+ }
174
+ },
175
+ {
176
+ "attention": {
177
+ "n_heads_in_group": null,
178
+ "no_op": true,
179
+ "num_sink_tokens": null,
180
+ "replace_with_linear": false,
181
+ "sparsify": null,
182
+ "unshifted_sink": false,
183
+ "use_prefill_window_in_sink_attention": false,
184
+ "window_length": null
185
+ },
186
+ "ffn": {
187
+ "ffn_mult": null,
188
+ "no_op": true,
189
+ "replace_with_linear": false,
190
+ "sparsify": null
191
+ }
192
+ },
193
+ {
194
+ "attention": {
195
+ "n_heads_in_group": null,
196
+ "no_op": true,
197
+ "num_sink_tokens": null,
198
+ "replace_with_linear": false,
199
+ "sparsify": null,
200
+ "unshifted_sink": false,
201
+ "use_prefill_window_in_sink_attention": false,
202
+ "window_length": null
203
+ },
204
+ "ffn": {
205
+ "ffn_mult": null,
206
+ "no_op": true,
207
+ "replace_with_linear": false,
208
+ "sparsify": null
209
+ }
210
+ },
211
+ {
212
+ "attention": {
213
+ "n_heads_in_group": null,
214
+ "no_op": true,
215
+ "num_sink_tokens": null,
216
+ "replace_with_linear": false,
217
+ "sparsify": null,
218
+ "unshifted_sink": false,
219
+ "use_prefill_window_in_sink_attention": false,
220
+ "window_length": null
221
+ },
222
+ "ffn": {
223
+ "ffn_mult": null,
224
+ "no_op": true,
225
+ "replace_with_linear": false,
226
+ "sparsify": null
227
+ }
228
+ },
229
+ {
230
+ "attention": {
231
+ "n_heads_in_group": null,
232
+ "no_op": true,
233
+ "num_sink_tokens": null,
234
+ "replace_with_linear": false,
235
+ "sparsify": null,
236
+ "unshifted_sink": false,
237
+ "use_prefill_window_in_sink_attention": false,
238
+ "window_length": null
239
+ },
240
+ "ffn": {
241
+ "ffn_mult": null,
242
+ "no_op": true,
243
+ "replace_with_linear": false,
244
+ "sparsify": null
245
+ }
246
+ },
247
+ {
248
+ "attention": {
249
+ "n_heads_in_group": 16,
250
+ "no_op": false,
251
+ "num_sink_tokens": null,
252
+ "replace_with_linear": false,
253
+ "sparsify": null,
254
+ "unshifted_sink": false,
255
+ "use_prefill_window_in_sink_attention": false,
256
+ "window_length": null
257
+ },
258
+ "ffn": {
259
+ "ffn_mult": 1.95,
260
+ "no_op": false,
261
+ "replace_with_linear": false,
262
+ "sparsify": null
263
+ }
264
+ },
265
+ {
266
+ "attention": {
267
+ "n_heads_in_group": 16,
268
+ "no_op": false,
269
+ "num_sink_tokens": null,
270
+ "replace_with_linear": false,
271
+ "sparsify": null,
272
+ "unshifted_sink": false,
273
+ "use_prefill_window_in_sink_attention": false,
274
+ "window_length": null
275
+ },
276
+ "ffn": {
277
+ "ffn_mult": 1.95,
278
+ "no_op": false,
279
+ "replace_with_linear": false,
280
+ "sparsify": null
281
+ }
282
+ },
283
+ {
284
+ "attention": {
285
+ "n_heads_in_group": 16,
286
+ "no_op": false,
287
+ "num_sink_tokens": null,
288
+ "replace_with_linear": false,
289
+ "sparsify": null,
290
+ "unshifted_sink": false,
291
+ "use_prefill_window_in_sink_attention": false,
292
+ "window_length": null
293
+ },
294
+ "ffn": {
295
+ "ffn_mult": 1.95,
296
+ "no_op": false,
297
+ "replace_with_linear": false,
298
+ "sparsify": null
299
+ }
300
+ },
301
+ {
302
+ "attention": {
303
+ "n_heads_in_group": 16,
304
+ "no_op": false,
305
+ "num_sink_tokens": null,
306
+ "replace_with_linear": false,
307
+ "sparsify": null,
308
+ "unshifted_sink": false,
309
+ "use_prefill_window_in_sink_attention": false,
310
+ "window_length": null
311
+ },
312
+ "ffn": {
313
+ "ffn_mult": 4.875,
314
+ "no_op": false,
315
+ "replace_with_linear": false,
316
+ "sparsify": null
317
+ }
318
+ },
319
+ {
320
+ "attention": {
321
+ "n_heads_in_group": 16,
322
+ "no_op": false,
323
+ "num_sink_tokens": null,
324
+ "replace_with_linear": false,
325
+ "sparsify": null,
326
+ "unshifted_sink": false,
327
+ "use_prefill_window_in_sink_attention": false,
328
+ "window_length": null
329
+ },
330
+ "ffn": {
331
+ "ffn_mult": 4.875,
332
+ "no_op": false,
333
+ "replace_with_linear": false,
334
+ "sparsify": null
335
+ }
336
+ },
337
+ {
338
+ "attention": {
339
+ "n_heads_in_group": null,
340
+ "no_op": true,
341
+ "num_sink_tokens": null,
342
+ "replace_with_linear": false,
343
+ "sparsify": null,
344
+ "unshifted_sink": false,
345
+ "use_prefill_window_in_sink_attention": false,
346
+ "window_length": null
347
+ },
348
+ "ffn": {
349
+ "ffn_mult": null,
350
+ "no_op": true,
351
+ "replace_with_linear": false,
352
+ "sparsify": null
353
+ }
354
+ },
355
+ {
356
+ "attention": {
357
+ "n_heads_in_group": null,
358
+ "no_op": true,
359
+ "num_sink_tokens": null,
360
+ "replace_with_linear": false,
361
+ "sparsify": null,
362
+ "unshifted_sink": false,
363
+ "use_prefill_window_in_sink_attention": false,
364
+ "window_length": null
365
+ },
366
+ "ffn": {
367
+ "ffn_mult": null,
368
+ "no_op": true,
369
+ "replace_with_linear": false,
370
+ "sparsify": null
371
+ }
372
+ },
373
+ {
374
+ "attention": {
375
+ "n_heads_in_group": null,
376
+ "no_op": true,
377
+ "num_sink_tokens": null,
378
+ "replace_with_linear": false,
379
+ "sparsify": null,
380
+ "unshifted_sink": false,
381
+ "use_prefill_window_in_sink_attention": false,
382
+ "window_length": null
383
+ },
384
+ "ffn": {
385
+ "ffn_mult": null,
386
+ "no_op": true,
387
+ "replace_with_linear": false,
388
+ "sparsify": null
389
+ }
390
+ },
391
+ {
392
+ "attention": {
393
+ "n_heads_in_group": null,
394
+ "no_op": true,
395
+ "num_sink_tokens": null,
396
+ "replace_with_linear": false,
397
+ "sparsify": null,
398
+ "unshifted_sink": false,
399
+ "use_prefill_window_in_sink_attention": false,
400
+ "window_length": null
401
+ },
402
+ "ffn": {
403
+ "ffn_mult": null,
404
+ "no_op": true,
405
+ "replace_with_linear": false,
406
+ "sparsify": null
407
+ }
408
+ },
409
+ {
410
+ "attention": {
411
+ "n_heads_in_group": null,
412
+ "no_op": true,
413
+ "num_sink_tokens": null,
414
+ "replace_with_linear": false,
415
+ "sparsify": null,
416
+ "unshifted_sink": false,
417
+ "use_prefill_window_in_sink_attention": false,
418
+ "window_length": null
419
+ },
420
+ "ffn": {
421
+ "ffn_mult": null,
422
+ "no_op": true,
423
+ "replace_with_linear": false,
424
+ "sparsify": null
425
+ }
426
+ },
427
+ {
428
+ "attention": {
429
+ "n_heads_in_group": null,
430
+ "no_op": true,
431
+ "num_sink_tokens": null,
432
+ "replace_with_linear": false,
433
+ "sparsify": null,
434
+ "unshifted_sink": false,
435
+ "use_prefill_window_in_sink_attention": false,
436
+ "window_length": null
437
+ },
438
+ "ffn": {
439
+ "ffn_mult": null,
440
+ "no_op": true,
441
+ "replace_with_linear": false,
442
+ "sparsify": null
443
+ }
444
+ },
445
+ {
446
+ "attention": {
447
+ "n_heads_in_group": 16,
448
+ "no_op": false,
449
+ "num_sink_tokens": null,
450
+ "replace_with_linear": false,
451
+ "sparsify": null,
452
+ "unshifted_sink": false,
453
+ "use_prefill_window_in_sink_attention": false,
454
+ "window_length": null
455
+ },
456
+ "ffn": {
457
+ "ffn_mult": 4.875,
458
+ "no_op": false,
459
+ "replace_with_linear": false,
460
+ "sparsify": null
461
+ }
462
+ },
463
+ {
464
+ "attention": {
465
+ "n_heads_in_group": 16,
466
+ "no_op": false,
467
+ "num_sink_tokens": null,
468
+ "replace_with_linear": false,
469
+ "sparsify": null,
470
+ "unshifted_sink": false,
471
+ "use_prefill_window_in_sink_attention": false,
472
+ "window_length": null
473
+ },
474
+ "ffn": {
475
+ "ffn_mult": 4.875,
476
+ "no_op": false,
477
+ "replace_with_linear": false,
478
+ "sparsify": null
479
+ }
480
+ },
481
+ {
482
+ "attention": {
483
+ "n_heads_in_group": 16,
484
+ "no_op": false,
485
+ "num_sink_tokens": null,
486
+ "replace_with_linear": false,
487
+ "sparsify": null,
488
+ "unshifted_sink": false,
489
+ "use_prefill_window_in_sink_attention": false,
490
+ "window_length": null
491
+ },
492
+ "ffn": {
493
+ "ffn_mult": 4.875,
494
+ "no_op": false,
495
+ "replace_with_linear": false,
496
+ "sparsify": null
497
+ }
498
+ },
499
+ {
500
+ "attention": {
501
+ "n_heads_in_group": null,
502
+ "no_op": true,
503
+ "num_sink_tokens": null,
504
+ "replace_with_linear": false,
505
+ "sparsify": null,
506
+ "unshifted_sink": false,
507
+ "use_prefill_window_in_sink_attention": false,
508
+ "window_length": null
509
+ },
510
+ "ffn": {
511
+ "ffn_mult": null,
512
+ "no_op": true,
513
+ "replace_with_linear": false,
514
+ "sparsify": null
515
+ }
516
+ },
517
+ {
518
+ "attention": {
519
+ "n_heads_in_group": null,
520
+ "no_op": true,
521
+ "num_sink_tokens": null,
522
+ "replace_with_linear": false,
523
+ "sparsify": null,
524
+ "unshifted_sink": false,
525
+ "use_prefill_window_in_sink_attention": false,
526
+ "window_length": null
527
+ },
528
+ "ffn": {
529
+ "ffn_mult": null,
530
+ "no_op": true,
531
+ "replace_with_linear": false,
532
+ "sparsify": null
533
+ }
534
+ },
535
+ {
536
+ "attention": {
537
+ "n_heads_in_group": null,
538
+ "no_op": true,
539
+ "num_sink_tokens": null,
540
+ "replace_with_linear": false,
541
+ "sparsify": null,
542
+ "unshifted_sink": false,
543
+ "use_prefill_window_in_sink_attention": false,
544
+ "window_length": null
545
+ },
546
+ "ffn": {
547
+ "ffn_mult": null,
548
+ "no_op": true,
549
+ "replace_with_linear": false,
550
+ "sparsify": null
551
+ }
552
+ },
553
+ {
554
+ "attention": {
555
+ "n_heads_in_group": null,
556
+ "no_op": true,
557
+ "num_sink_tokens": null,
558
+ "replace_with_linear": false,
559
+ "sparsify": null,
560
+ "unshifted_sink": false,
561
+ "use_prefill_window_in_sink_attention": false,
562
+ "window_length": null
563
+ },
564
+ "ffn": {
565
+ "ffn_mult": null,
566
+ "no_op": true,
567
+ "replace_with_linear": false,
568
+ "sparsify": null
569
+ }
570
+ },
571
+ {
572
+ "attention": {
573
+ "n_heads_in_group": null,
574
+ "no_op": true,
575
+ "num_sink_tokens": null,
576
+ "replace_with_linear": false,
577
+ "sparsify": null,
578
+ "unshifted_sink": false,
579
+ "use_prefill_window_in_sink_attention": false,
580
+ "window_length": null
581
+ },
582
+ "ffn": {
583
+ "ffn_mult": null,
584
+ "no_op": true,
585
+ "replace_with_linear": false,
586
+ "sparsify": null
587
+ }
588
+ },
589
+ {
590
+ "attention": {
591
+ "n_heads_in_group": 16,
592
+ "no_op": false,
593
+ "num_sink_tokens": null,
594
+ "replace_with_linear": false,
595
+ "sparsify": null,
596
+ "unshifted_sink": false,
597
+ "use_prefill_window_in_sink_attention": false,
598
+ "window_length": null
599
+ },
600
+ "ffn": {
601
+ "ffn_mult": 4.875,
602
+ "no_op": false,
603
+ "replace_with_linear": false,
604
+ "sparsify": null
605
+ }
606
+ },
607
+ {
608
+ "attention": {
609
+ "n_heads_in_group": 16,
610
+ "no_op": false,
611
+ "num_sink_tokens": null,
612
+ "replace_with_linear": false,
613
+ "sparsify": null,
614
+ "unshifted_sink": false,
615
+ "use_prefill_window_in_sink_attention": false,
616
+ "window_length": null
617
+ },
618
+ "ffn": {
619
+ "ffn_mult": 4.875,
620
+ "no_op": false,
621
+ "replace_with_linear": false,
622
+ "sparsify": null
623
+ }
624
+ },
625
+ {
626
+ "attention": {
627
+ "n_heads_in_group": 16,
628
+ "no_op": false,
629
+ "num_sink_tokens": null,
630
+ "replace_with_linear": false,
631
+ "sparsify": null,
632
+ "unshifted_sink": false,
633
+ "use_prefill_window_in_sink_attention": false,
634
+ "window_length": null
635
+ },
636
+ "ffn": {
637
+ "ffn_mult": 4.875,
638
+ "no_op": false,
639
+ "replace_with_linear": false,
640
+ "sparsify": null
641
+ }
642
+ },
643
+ {
644
+ "attention": {
645
+ "n_heads_in_group": 16,
646
+ "no_op": false,
647
+ "num_sink_tokens": null,
648
+ "replace_with_linear": false,
649
+ "sparsify": null,
650
+ "unshifted_sink": false,
651
+ "use_prefill_window_in_sink_attention": false,
652
+ "window_length": null
653
+ },
654
+ "ffn": {
655
+ "ffn_mult": 2.4375,
656
+ "no_op": false,
657
+ "replace_with_linear": false,
658
+ "sparsify": null
659
+ }
660
+ },
661
+ {
662
+ "attention": {
663
+ "n_heads_in_group": null,
664
+ "no_op": true,
665
+ "num_sink_tokens": null,
666
+ "replace_with_linear": false,
667
+ "sparsify": null,
668
+ "unshifted_sink": false,
669
+ "use_prefill_window_in_sink_attention": false,
670
+ "window_length": null
671
+ },
672
+ "ffn": {
673
+ "ffn_mult": null,
674
+ "no_op": true,
675
+ "replace_with_linear": false,
676
+ "sparsify": null
677
+ }
678
+ },
679
+ {
680
+ "attention": {
681
+ "n_heads_in_group": null,
682
+ "no_op": true,
683
+ "num_sink_tokens": null,
684
+ "replace_with_linear": false,
685
+ "sparsify": null,
686
+ "unshifted_sink": false,
687
+ "use_prefill_window_in_sink_attention": false,
688
+ "window_length": null
689
+ },
690
+ "ffn": {
691
+ "ffn_mult": null,
692
+ "no_op": true,
693
+ "replace_with_linear": false,
694
+ "sparsify": null
695
+ }
696
+ },
697
+ {
698
+ "attention": {
699
+ "n_heads_in_group": null,
700
+ "no_op": true,
701
+ "num_sink_tokens": null,
702
+ "replace_with_linear": false,
703
+ "sparsify": null,
704
+ "unshifted_sink": false,
705
+ "use_prefill_window_in_sink_attention": false,
706
+ "window_length": null
707
+ },
708
+ "ffn": {
709
+ "ffn_mult": null,
710
+ "no_op": true,
711
+ "replace_with_linear": false,
712
+ "sparsify": null
713
+ }
714
+ },
715
+ {
716
+ "attention": {
717
+ "n_heads_in_group": 16,
718
+ "no_op": false,
719
+ "num_sink_tokens": null,
720
+ "replace_with_linear": false,
721
+ "sparsify": null,
722
+ "unshifted_sink": false,
723
+ "use_prefill_window_in_sink_attention": false,
724
+ "window_length": null
725
+ },
726
+ "ffn": {
727
+ "ffn_mult": 1.95,
728
+ "no_op": false,
729
+ "replace_with_linear": false,
730
+ "sparsify": null
731
+ }
732
+ },
733
+ {
734
+ "attention": {
735
+ "n_heads_in_group": 16,
736
+ "no_op": false,
737
+ "num_sink_tokens": null,
738
+ "replace_with_linear": false,
739
+ "sparsify": null,
740
+ "unshifted_sink": false,
741
+ "use_prefill_window_in_sink_attention": false,
742
+ "window_length": null
743
+ },
744
+ "ffn": {
745
+ "ffn_mult": 1.95,
746
+ "no_op": false,
747
+ "replace_with_linear": false,
748
+ "sparsify": null
749
+ }
750
+ },
751
+ {
752
+ "attention": {
753
+ "n_heads_in_group": 16,
754
+ "no_op": false,
755
+ "num_sink_tokens": null,
756
+ "replace_with_linear": false,
757
+ "sparsify": null,
758
+ "unshifted_sink": false,
759
+ "use_prefill_window_in_sink_attention": false,
760
+ "window_length": null
761
+ },
762
+ "ffn": {
763
+ "ffn_mult": 1.95,
764
+ "no_op": false,
765
+ "replace_with_linear": false,
766
+ "sparsify": null
767
+ }
768
+ },
769
+ {
770
+ "attention": {
771
+ "n_heads_in_group": null,
772
+ "no_op": true,
773
+ "num_sink_tokens": null,
774
+ "replace_with_linear": false,
775
+ "sparsify": null,
776
+ "unshifted_sink": false,
777
+ "use_prefill_window_in_sink_attention": false,
778
+ "window_length": null
779
+ },
780
+ "ffn": {
781
+ "ffn_mult": 1.95,
782
+ "no_op": false,
783
+ "replace_with_linear": false,
784
+ "sparsify": null
785
+ }
786
+ },
787
+ {
788
+ "attention": {
789
+ "n_heads_in_group": 16,
790
+ "no_op": false,
791
+ "num_sink_tokens": null,
792
+ "replace_with_linear": false,
793
+ "sparsify": null,
794
+ "unshifted_sink": false,
795
+ "use_prefill_window_in_sink_attention": false,
796
+ "window_length": null
797
+ },
798
+ "ffn": {
799
+ "ffn_mult": 4.875,
800
+ "no_op": false,
801
+ "replace_with_linear": false,
802
+ "sparsify": null
803
+ }
804
+ },
805
+ {
806
+ "attention": {
807
+ "n_heads_in_group": null,
808
+ "no_op": true,
809
+ "num_sink_tokens": null,
810
+ "replace_with_linear": false,
811
+ "sparsify": null,
812
+ "unshifted_sink": false,
813
+ "use_prefill_window_in_sink_attention": false,
814
+ "window_length": null
815
+ },
816
+ "ffn": {
817
+ "ffn_mult": 4.875,
818
+ "no_op": false,
819
+ "replace_with_linear": false,
820
+ "sparsify": null
821
+ }
822
+ },
823
+ {
824
+ "attention": {
825
+ "n_heads_in_group": null,
826
+ "no_op": true,
827
+ "num_sink_tokens": null,
828
+ "replace_with_linear": false,
829
+ "sparsify": null,
830
+ "unshifted_sink": false,
831
+ "use_prefill_window_in_sink_attention": false,
832
+ "window_length": null
833
+ },
834
+ "ffn": {
835
+ "ffn_mult": null,
836
+ "no_op": true,
837
+ "replace_with_linear": false,
838
+ "sparsify": null
839
+ }
840
+ },
841
+ {
842
+ "attention": {
843
+ "n_heads_in_group": null,
844
+ "no_op": true,
845
+ "num_sink_tokens": null,
846
+ "replace_with_linear": false,
847
+ "sparsify": null,
848
+ "unshifted_sink": false,
849
+ "use_prefill_window_in_sink_attention": false,
850
+ "window_length": null
851
+ },
852
+ "ffn": {
853
+ "ffn_mult": null,
854
+ "no_op": true,
855
+ "replace_with_linear": false,
856
+ "sparsify": null
857
+ }
858
+ },
859
+ {
860
+ "attention": {
861
+ "n_heads_in_group": null,
862
+ "no_op": true,
863
+ "num_sink_tokens": null,
864
+ "replace_with_linear": false,
865
+ "sparsify": null,
866
+ "unshifted_sink": false,
867
+ "use_prefill_window_in_sink_attention": false,
868
+ "window_length": null
869
+ },
870
+ "ffn": {
871
+ "ffn_mult": null,
872
+ "no_op": true,
873
+ "replace_with_linear": false,
874
+ "sparsify": null
875
+ }
876
+ },
877
+ {
878
+ "attention": {
879
+ "n_heads_in_group": null,
880
+ "no_op": true,
881
+ "num_sink_tokens": null,
882
+ "replace_with_linear": false,
883
+ "sparsify": null,
884
+ "unshifted_sink": false,
885
+ "use_prefill_window_in_sink_attention": false,
886
+ "window_length": null
887
+ },
888
+ "ffn": {
889
+ "ffn_mult": null,
890
+ "no_op": true,
891
+ "replace_with_linear": false,
892
+ "sparsify": null
893
+ }
894
+ },
895
+ {
896
+ "attention": {
897
+ "n_heads_in_group": null,
898
+ "no_op": true,
899
+ "num_sink_tokens": null,
900
+ "replace_with_linear": false,
901
+ "sparsify": null,
902
+ "unshifted_sink": false,
903
+ "use_prefill_window_in_sink_attention": false,
904
+ "window_length": null
905
+ },
906
+ "ffn": {
907
+ "ffn_mult": null,
908
+ "no_op": true,
909
+ "replace_with_linear": false,
910
+ "sparsify": null
911
+ }
912
+ },
913
+ {
914
+ "attention": {
915
+ "n_heads_in_group": 16,
916
+ "no_op": false,
917
+ "num_sink_tokens": null,
918
+ "replace_with_linear": false,
919
+ "sparsify": null,
920
+ "unshifted_sink": false,
921
+ "use_prefill_window_in_sink_attention": false,
922
+ "window_length": null
923
+ },
924
+ "ffn": {
925
+ "ffn_mult": 4.875,
926
+ "no_op": false,
927
+ "replace_with_linear": false,
928
+ "sparsify": null
929
+ }
930
+ },
931
+ {
932
+ "attention": {
933
+ "n_heads_in_group": 16,
934
+ "no_op": false,
935
+ "num_sink_tokens": null,
936
+ "replace_with_linear": false,
937
+ "sparsify": null,
938
+ "unshifted_sink": false,
939
+ "use_prefill_window_in_sink_attention": false,
940
+ "window_length": null
941
+ },
942
+ "ffn": {
943
+ "ffn_mult": 4.875,
944
+ "no_op": false,
945
+ "replace_with_linear": false,
946
+ "sparsify": null
947
+ }
948
+ },
949
+ {
950
+ "attention": {
951
+ "n_heads_in_group": 16,
952
+ "no_op": false,
953
+ "num_sink_tokens": null,
954
+ "replace_with_linear": false,
955
+ "sparsify": null,
956
+ "unshifted_sink": false,
957
+ "use_prefill_window_in_sink_attention": false,
958
+ "window_length": null
959
+ },
960
+ "ffn": {
961
+ "ffn_mult": 4.875,
962
+ "no_op": false,
963
+ "replace_with_linear": false,
964
+ "sparsify": null
965
+ }
966
+ },
967
+ {
968
+ "attention": {
969
+ "n_heads_in_group": 16,
970
+ "no_op": false,
971
+ "num_sink_tokens": null,
972
+ "replace_with_linear": false,
973
+ "sparsify": null,
974
+ "unshifted_sink": false,
975
+ "use_prefill_window_in_sink_attention": false,
976
+ "window_length": null
977
+ },
978
+ "ffn": {
979
+ "ffn_mult": 4.875,
980
+ "no_op": false,
981
+ "replace_with_linear": false,
982
+ "sparsify": null
983
+ }
984
+ },
985
+ {
986
+ "attention": {
987
+ "n_heads_in_group": null,
988
+ "no_op": true,
989
+ "num_sink_tokens": null,
990
+ "replace_with_linear": false,
991
+ "sparsify": null,
992
+ "unshifted_sink": false,
993
+ "use_prefill_window_in_sink_attention": false,
994
+ "window_length": null
995
+ },
996
+ "ffn": {
997
+ "ffn_mult": null,
998
+ "no_op": true,
999
+ "replace_with_linear": false,
1000
+ "sparsify": null
1001
+ }
1002
+ },
1003
+ {
1004
+ "attention": {
1005
+ "n_heads_in_group": null,
1006
+ "no_op": true,
1007
+ "num_sink_tokens": null,
1008
+ "replace_with_linear": false,
1009
+ "sparsify": null,
1010
+ "unshifted_sink": false,
1011
+ "use_prefill_window_in_sink_attention": false,
1012
+ "window_length": null
1013
+ },
1014
+ "ffn": {
1015
+ "ffn_mult": null,
1016
+ "no_op": true,
1017
+ "replace_with_linear": false,
1018
+ "sparsify": null
1019
+ }
1020
+ },
1021
+ {
1022
+ "attention": {
1023
+ "n_heads_in_group": null,
1024
+ "no_op": true,
1025
+ "num_sink_tokens": null,
1026
+ "replace_with_linear": false,
1027
+ "sparsify": null,
1028
+ "unshifted_sink": false,
1029
+ "use_prefill_window_in_sink_attention": false,
1030
+ "window_length": null
1031
+ },
1032
+ "ffn": {
1033
+ "ffn_mult": null,
1034
+ "no_op": true,
1035
+ "replace_with_linear": false,
1036
+ "sparsify": null
1037
+ }
1038
+ },
1039
+ {
1040
+ "attention": {
1041
+ "n_heads_in_group": null,
1042
+ "no_op": true,
1043
+ "num_sink_tokens": null,
1044
+ "replace_with_linear": false,
1045
+ "sparsify": null,
1046
+ "unshifted_sink": false,
1047
+ "use_prefill_window_in_sink_attention": false,
1048
+ "window_length": null
1049
+ },
1050
+ "ffn": {
1051
+ "ffn_mult": null,
1052
+ "no_op": true,
1053
+ "replace_with_linear": false,
1054
+ "sparsify": null
1055
+ }
1056
+ },
1057
+ {
1058
+ "attention": {
1059
+ "n_heads_in_group": null,
1060
+ "no_op": true,
1061
+ "num_sink_tokens": null,
1062
+ "replace_with_linear": false,
1063
+ "sparsify": null,
1064
+ "unshifted_sink": false,
1065
+ "use_prefill_window_in_sink_attention": false,
1066
+ "window_length": null
1067
+ },
1068
+ "ffn": {
1069
+ "ffn_mult": null,
1070
+ "no_op": true,
1071
+ "replace_with_linear": false,
1072
+ "sparsify": null
1073
+ }
1074
+ },
1075
+ {
1076
+ "attention": {
1077
+ "n_heads_in_group": 16,
1078
+ "no_op": false,
1079
+ "num_sink_tokens": null,
1080
+ "replace_with_linear": false,
1081
+ "sparsify": null,
1082
+ "unshifted_sink": false,
1083
+ "use_prefill_window_in_sink_attention": false,
1084
+ "window_length": null
1085
+ },
1086
+ "ffn": {
1087
+ "ffn_mult": 4.875,
1088
+ "no_op": false,
1089
+ "replace_with_linear": false,
1090
+ "sparsify": null
1091
+ }
1092
+ },
1093
+ {
1094
+ "attention": {
1095
+ "n_heads_in_group": 16,
1096
+ "no_op": false,
1097
+ "num_sink_tokens": null,
1098
+ "replace_with_linear": false,
1099
+ "sparsify": null,
1100
+ "unshifted_sink": false,
1101
+ "use_prefill_window_in_sink_attention": false,
1102
+ "window_length": null
1103
+ },
1104
+ "ffn": {
1105
+ "ffn_mult": 4.875,
1106
+ "no_op": false,
1107
+ "replace_with_linear": false,
1108
+ "sparsify": null
1109
+ }
1110
+ },
1111
+ {
1112
+ "attention": {
1113
+ "n_heads_in_group": 16,
1114
+ "no_op": false,
1115
+ "num_sink_tokens": null,
1116
+ "replace_with_linear": false,
1117
+ "sparsify": null,
1118
+ "unshifted_sink": false,
1119
+ "use_prefill_window_in_sink_attention": false,
1120
+ "window_length": null
1121
+ },
1122
+ "ffn": {
1123
+ "ffn_mult": 4.875,
1124
+ "no_op": false,
1125
+ "replace_with_linear": false,
1126
+ "sparsify": null
1127
+ }
1128
+ },
1129
+ {
1130
+ "attention": {
1131
+ "n_heads_in_group": 16,
1132
+ "no_op": false,
1133
+ "num_sink_tokens": null,
1134
+ "replace_with_linear": false,
1135
+ "sparsify": null,
1136
+ "unshifted_sink": false,
1137
+ "use_prefill_window_in_sink_attention": false,
1138
+ "window_length": null
1139
+ },
1140
+ "ffn": {
1141
+ "ffn_mult": 4.875,
1142
+ "no_op": false,
1143
+ "replace_with_linear": false,
1144
+ "sparsify": null
1145
+ }
1146
+ },
1147
+ {
1148
+ "attention": {
1149
+ "n_heads_in_group": null,
1150
+ "no_op": true,
1151
+ "num_sink_tokens": null,
1152
+ "replace_with_linear": false,
1153
+ "sparsify": null,
1154
+ "unshifted_sink": false,
1155
+ "use_prefill_window_in_sink_attention": false,
1156
+ "window_length": null
1157
+ },
1158
+ "ffn": {
1159
+ "ffn_mult": null,
1160
+ "no_op": true,
1161
+ "replace_with_linear": false,
1162
+ "sparsify": null
1163
+ }
1164
+ },
1165
+ {
1166
+ "attention": {
1167
+ "n_heads_in_group": null,
1168
+ "no_op": true,
1169
+ "num_sink_tokens": null,
1170
+ "replace_with_linear": false,
1171
+ "sparsify": null,
1172
+ "unshifted_sink": false,
1173
+ "use_prefill_window_in_sink_attention": false,
1174
+ "window_length": null
1175
+ },
1176
+ "ffn": {
1177
+ "ffn_mult": null,
1178
+ "no_op": true,
1179
+ "replace_with_linear": false,
1180
+ "sparsify": null
1181
+ }
1182
+ },
1183
+ {
1184
+ "attention": {
1185
+ "n_heads_in_group": null,
1186
+ "no_op": true,
1187
+ "num_sink_tokens": null,
1188
+ "replace_with_linear": false,
1189
+ "sparsify": null,
1190
+ "unshifted_sink": false,
1191
+ "use_prefill_window_in_sink_attention": false,
1192
+ "window_length": null
1193
+ },
1194
+ "ffn": {
1195
+ "ffn_mult": null,
1196
+ "no_op": true,
1197
+ "replace_with_linear": false,
1198
+ "sparsify": null
1199
+ }
1200
+ },
1201
+ {
1202
+ "attention": {
1203
+ "n_heads_in_group": null,
1204
+ "no_op": true,
1205
+ "num_sink_tokens": null,
1206
+ "replace_with_linear": false,
1207
+ "sparsify": null,
1208
+ "unshifted_sink": false,
1209
+ "use_prefill_window_in_sink_attention": false,
1210
+ "window_length": null
1211
+ },
1212
+ "ffn": {
1213
+ "ffn_mult": null,
1214
+ "no_op": true,
1215
+ "replace_with_linear": false,
1216
+ "sparsify": null
1217
+ }
1218
+ },
1219
+ {
1220
+ "attention": {
1221
+ "n_heads_in_group": null,
1222
+ "no_op": true,
1223
+ "num_sink_tokens": null,
1224
+ "replace_with_linear": false,
1225
+ "sparsify": null,
1226
+ "unshifted_sink": false,
1227
+ "use_prefill_window_in_sink_attention": false,
1228
+ "window_length": null
1229
+ },
1230
+ "ffn": {
1231
+ "ffn_mult": null,
1232
+ "no_op": true,
1233
+ "replace_with_linear": false,
1234
+ "sparsify": null
1235
+ }
1236
+ },
1237
+ {
1238
+ "attention": {
1239
+ "n_heads_in_group": 16,
1240
+ "no_op": false,
1241
+ "num_sink_tokens": null,
1242
+ "replace_with_linear": false,
1243
+ "sparsify": null,
1244
+ "unshifted_sink": false,
1245
+ "use_prefill_window_in_sink_attention": false,
1246
+ "window_length": null
1247
+ },
1248
+ "ffn": {
1249
+ "ffn_mult": 4.875,
1250
+ "no_op": false,
1251
+ "replace_with_linear": false,
1252
+ "sparsify": null
1253
+ }
1254
+ },
1255
+ {
1256
+ "attention": {
1257
+ "n_heads_in_group": 16,
1258
+ "no_op": false,
1259
+ "num_sink_tokens": null,
1260
+ "replace_with_linear": false,
1261
+ "sparsify": null,
1262
+ "unshifted_sink": false,
1263
+ "use_prefill_window_in_sink_attention": false,
1264
+ "window_length": null
1265
+ },
1266
+ "ffn": {
1267
+ "ffn_mult": 4.875,
1268
+ "no_op": false,
1269
+ "replace_with_linear": false,
1270
+ "sparsify": null
1271
+ }
1272
+ },
1273
+ {
1274
+ "attention": {
1275
+ "n_heads_in_group": 16,
1276
+ "no_op": false,
1277
+ "num_sink_tokens": null,
1278
+ "replace_with_linear": false,
1279
+ "sparsify": null,
1280
+ "unshifted_sink": false,
1281
+ "use_prefill_window_in_sink_attention": false,
1282
+ "window_length": null
1283
+ },
1284
+ "ffn": {
1285
+ "ffn_mult": 4.875,
1286
+ "no_op": false,
1287
+ "replace_with_linear": false,
1288
+ "sparsify": null
1289
+ }
1290
+ },
1291
+ {
1292
+ "attention": {
1293
+ "n_heads_in_group": 16,
1294
+ "no_op": false,
1295
+ "num_sink_tokens": null,
1296
+ "replace_with_linear": false,
1297
+ "sparsify": null,
1298
+ "unshifted_sink": false,
1299
+ "use_prefill_window_in_sink_attention": false,
1300
+ "window_length": null
1301
+ },
1302
+ "ffn": {
1303
+ "ffn_mult": 4.875,
1304
+ "no_op": false,
1305
+ "replace_with_linear": false,
1306
+ "sparsify": null
1307
+ }
1308
+ },
1309
+ {
1310
+ "attention": {
1311
+ "n_heads_in_group": null,
1312
+ "no_op": true,
1313
+ "num_sink_tokens": null,
1314
+ "replace_with_linear": false,
1315
+ "sparsify": null,
1316
+ "unshifted_sink": false,
1317
+ "use_prefill_window_in_sink_attention": false,
1318
+ "window_length": null
1319
+ },
1320
+ "ffn": {
1321
+ "ffn_mult": null,
1322
+ "no_op": true,
1323
+ "replace_with_linear": false,
1324
+ "sparsify": null
1325
+ }
1326
+ },
1327
+ {
1328
+ "attention": {
1329
+ "n_heads_in_group": null,
1330
+ "no_op": true,
1331
+ "num_sink_tokens": null,
1332
+ "replace_with_linear": false,
1333
+ "sparsify": null,
1334
+ "unshifted_sink": false,
1335
+ "use_prefill_window_in_sink_attention": false,
1336
+ "window_length": null
1337
+ },
1338
+ "ffn": {
1339
+ "ffn_mult": null,
1340
+ "no_op": true,
1341
+ "replace_with_linear": false,
1342
+ "sparsify": null
1343
+ }
1344
+ },
1345
+ {
1346
+ "attention": {
1347
+ "n_heads_in_group": null,
1348
+ "no_op": true,
1349
+ "num_sink_tokens": null,
1350
+ "replace_with_linear": false,
1351
+ "sparsify": null,
1352
+ "unshifted_sink": false,
1353
+ "use_prefill_window_in_sink_attention": false,
1354
+ "window_length": null
1355
+ },
1356
+ "ffn": {
1357
+ "ffn_mult": null,
1358
+ "no_op": true,
1359
+ "replace_with_linear": false,
1360
+ "sparsify": null
1361
+ }
1362
+ },
1363
+ {
1364
+ "attention": {
1365
+ "n_heads_in_group": null,
1366
+ "no_op": true,
1367
+ "num_sink_tokens": null,
1368
+ "replace_with_linear": false,
1369
+ "sparsify": null,
1370
+ "unshifted_sink": false,
1371
+ "use_prefill_window_in_sink_attention": false,
1372
+ "window_length": null
1373
+ },
1374
+ "ffn": {
1375
+ "ffn_mult": null,
1376
+ "no_op": true,
1377
+ "replace_with_linear": false,
1378
+ "sparsify": null
1379
+ }
1380
+ },
1381
+ {
1382
+ "attention": {
1383
+ "n_heads_in_group": null,
1384
+ "no_op": true,
1385
+ "num_sink_tokens": null,
1386
+ "replace_with_linear": false,
1387
+ "sparsify": null,
1388
+ "unshifted_sink": false,
1389
+ "use_prefill_window_in_sink_attention": false,
1390
+ "window_length": null
1391
+ },
1392
+ "ffn": {
1393
+ "ffn_mult": null,
1394
+ "no_op": true,
1395
+ "replace_with_linear": false,
1396
+ "sparsify": null
1397
+ }
1398
+ },
1399
+ {
1400
+ "attention": {
1401
+ "n_heads_in_group": 16,
1402
+ "no_op": false,
1403
+ "num_sink_tokens": null,
1404
+ "replace_with_linear": false,
1405
+ "sparsify": null,
1406
+ "unshifted_sink": false,
1407
+ "use_prefill_window_in_sink_attention": false,
1408
+ "window_length": null
1409
+ },
1410
+ "ffn": {
1411
+ "ffn_mult": 4.875,
1412
+ "no_op": false,
1413
+ "replace_with_linear": false,
1414
+ "sparsify": null
1415
+ }
1416
+ },
1417
+ {
1418
+ "attention": {
1419
+ "n_heads_in_group": 16,
1420
+ "no_op": false,
1421
+ "num_sink_tokens": null,
1422
+ "replace_with_linear": false,
1423
+ "sparsify": null,
1424
+ "unshifted_sink": false,
1425
+ "use_prefill_window_in_sink_attention": false,
1426
+ "window_length": null
1427
+ },
1428
+ "ffn": {
1429
+ "ffn_mult": 4.875,
1430
+ "no_op": false,
1431
+ "replace_with_linear": false,
1432
+ "sparsify": null
1433
+ }
1434
+ },
1435
+ {
1436
+ "attention": {
1437
+ "n_heads_in_group": 16,
1438
+ "no_op": false,
1439
+ "num_sink_tokens": null,
1440
+ "replace_with_linear": false,
1441
+ "sparsify": null,
1442
+ "unshifted_sink": false,
1443
+ "use_prefill_window_in_sink_attention": false,
1444
+ "window_length": null
1445
+ },
1446
+ "ffn": {
1447
+ "ffn_mult": 4.875,
1448
+ "no_op": false,
1449
+ "replace_with_linear": false,
1450
+ "sparsify": null
1451
+ }
1452
+ },
1453
+ {
1454
+ "attention": {
1455
+ "n_heads_in_group": 16,
1456
+ "no_op": false,
1457
+ "num_sink_tokens": null,
1458
+ "replace_with_linear": false,
1459
+ "sparsify": null,
1460
+ "unshifted_sink": false,
1461
+ "use_prefill_window_in_sink_attention": false,
1462
+ "window_length": null
1463
+ },
1464
+ "ffn": {
1465
+ "ffn_mult": 4.875,
1466
+ "no_op": false,
1467
+ "replace_with_linear": false,
1468
+ "sparsify": null
1469
+ }
1470
+ },
1471
+ {
1472
+ "attention": {
1473
+ "n_heads_in_group": null,
1474
+ "no_op": true,
1475
+ "num_sink_tokens": null,
1476
+ "replace_with_linear": false,
1477
+ "sparsify": null,
1478
+ "unshifted_sink": false,
1479
+ "use_prefill_window_in_sink_attention": false,
1480
+ "window_length": null
1481
+ },
1482
+ "ffn": {
1483
+ "ffn_mult": null,
1484
+ "no_op": true,
1485
+ "replace_with_linear": false,
1486
+ "sparsify": null
1487
+ }
1488
+ },
1489
+ {
1490
+ "attention": {
1491
+ "n_heads_in_group": null,
1492
+ "no_op": true,
1493
+ "num_sink_tokens": null,
1494
+ "replace_with_linear": false,
1495
+ "sparsify": null,
1496
+ "unshifted_sink": false,
1497
+ "use_prefill_window_in_sink_attention": false,
1498
+ "window_length": null
1499
+ },
1500
+ "ffn": {
1501
+ "ffn_mult": null,
1502
+ "no_op": true,
1503
+ "replace_with_linear": false,
1504
+ "sparsify": null
1505
+ }
1506
+ },
1507
+ {
1508
+ "attention": {
1509
+ "n_heads_in_group": null,
1510
+ "no_op": true,
1511
+ "num_sink_tokens": null,
1512
+ "replace_with_linear": false,
1513
+ "sparsify": null,
1514
+ "unshifted_sink": false,
1515
+ "use_prefill_window_in_sink_attention": false,
1516
+ "window_length": null
1517
+ },
1518
+ "ffn": {
1519
+ "ffn_mult": null,
1520
+ "no_op": true,
1521
+ "replace_with_linear": false,
1522
+ "sparsify": null
1523
+ }
1524
+ },
1525
+ {
1526
+ "attention": {
1527
+ "n_heads_in_group": null,
1528
+ "no_op": true,
1529
+ "num_sink_tokens": null,
1530
+ "replace_with_linear": false,
1531
+ "sparsify": null,
1532
+ "unshifted_sink": false,
1533
+ "use_prefill_window_in_sink_attention": false,
1534
+ "window_length": null
1535
+ },
1536
+ "ffn": {
1537
+ "ffn_mult": null,
1538
+ "no_op": true,
1539
+ "replace_with_linear": false,
1540
+ "sparsify": null
1541
+ }
1542
+ },
1543
+ {
1544
+ "attention": {
1545
+ "n_heads_in_group": null,
1546
+ "no_op": true,
1547
+ "num_sink_tokens": null,
1548
+ "replace_with_linear": false,
1549
+ "sparsify": null,
1550
+ "unshifted_sink": false,
1551
+ "use_prefill_window_in_sink_attention": false,
1552
+ "window_length": null
1553
+ },
1554
+ "ffn": {
1555
+ "ffn_mult": null,
1556
+ "no_op": true,
1557
+ "replace_with_linear": false,
1558
+ "sparsify": null
1559
+ }
1560
+ },
1561
+ {
1562
+ "attention": {
1563
+ "n_heads_in_group": 16,
1564
+ "no_op": false,
1565
+ "num_sink_tokens": null,
1566
+ "replace_with_linear": false,
1567
+ "sparsify": null,
1568
+ "unshifted_sink": false,
1569
+ "use_prefill_window_in_sink_attention": false,
1570
+ "window_length": null
1571
+ },
1572
+ "ffn": {
1573
+ "ffn_mult": 4.875,
1574
+ "no_op": false,
1575
+ "replace_with_linear": false,
1576
+ "sparsify": null
1577
+ }
1578
+ },
1579
+ {
1580
+ "attention": {
1581
+ "n_heads_in_group": 16,
1582
+ "no_op": false,
1583
+ "num_sink_tokens": null,
1584
+ "replace_with_linear": false,
1585
+ "sparsify": null,
1586
+ "unshifted_sink": false,
1587
+ "use_prefill_window_in_sink_attention": false,
1588
+ "window_length": null
1589
+ },
1590
+ "ffn": {
1591
+ "ffn_mult": 3.4125,
1592
+ "no_op": false,
1593
+ "replace_with_linear": false,
1594
+ "sparsify": null
1595
+ }
1596
+ },
1597
+ {
1598
+ "attention": {
1599
+ "n_heads_in_group": 16,
1600
+ "no_op": false,
1601
+ "num_sink_tokens": null,
1602
+ "replace_with_linear": false,
1603
+ "sparsify": null,
1604
+ "unshifted_sink": false,
1605
+ "use_prefill_window_in_sink_attention": false,
1606
+ "window_length": null
1607
+ },
1608
+ "ffn": {
1609
+ "ffn_mult": 3.4125,
1610
+ "no_op": false,
1611
+ "replace_with_linear": false,
1612
+ "sparsify": null
1613
+ }
1614
+ },
1615
+ {
1616
+ "attention": {
1617
+ "n_heads_in_group": 16,
1618
+ "no_op": false,
1619
+ "num_sink_tokens": null,
1620
+ "replace_with_linear": false,
1621
+ "sparsify": null,
1622
+ "unshifted_sink": false,
1623
+ "use_prefill_window_in_sink_attention": false,
1624
+ "window_length": null
1625
+ },
1626
+ "ffn": {
1627
+ "ffn_mult": 3.4125,
1628
+ "no_op": false,
1629
+ "replace_with_linear": false,
1630
+ "sparsify": null
1631
+ }
1632
+ },
1633
+ {
1634
+ "attention": {
1635
+ "n_heads_in_group": null,
1636
+ "no_op": true,
1637
+ "num_sink_tokens": null,
1638
+ "replace_with_linear": false,
1639
+ "sparsify": null,
1640
+ "unshifted_sink": false,
1641
+ "use_prefill_window_in_sink_attention": false,
1642
+ "window_length": null
1643
+ },
1644
+ "ffn": {
1645
+ "ffn_mult": null,
1646
+ "no_op": true,
1647
+ "replace_with_linear": false,
1648
+ "sparsify": null
1649
+ }
1650
+ },
1651
+ {
1652
+ "attention": {
1653
+ "n_heads_in_group": null,
1654
+ "no_op": true,
1655
+ "num_sink_tokens": null,
1656
+ "replace_with_linear": false,
1657
+ "sparsify": null,
1658
+ "unshifted_sink": false,
1659
+ "use_prefill_window_in_sink_attention": false,
1660
+ "window_length": null
1661
+ },
1662
+ "ffn": {
1663
+ "ffn_mult": null,
1664
+ "no_op": true,
1665
+ "replace_with_linear": false,
1666
+ "sparsify": null
1667
+ }
1668
+ },
1669
+ {
1670
+ "attention": {
1671
+ "n_heads_in_group": 16,
1672
+ "no_op": false,
1673
+ "num_sink_tokens": null,
1674
+ "replace_with_linear": false,
1675
+ "sparsify": null,
1676
+ "unshifted_sink": false,
1677
+ "use_prefill_window_in_sink_attention": false,
1678
+ "window_length": null
1679
+ },
1680
+ "ffn": {
1681
+ "ffn_mult": 2.925,
1682
+ "no_op": false,
1683
+ "replace_with_linear": false,
1684
+ "sparsify": null
1685
+ }
1686
+ },
1687
+ {
1688
+ "attention": {
1689
+ "n_heads_in_group": 16,
1690
+ "no_op": false,
1691
+ "num_sink_tokens": null,
1692
+ "replace_with_linear": false,
1693
+ "sparsify": null,
1694
+ "unshifted_sink": false,
1695
+ "use_prefill_window_in_sink_attention": false,
1696
+ "window_length": null
1697
+ },
1698
+ "ffn": {
1699
+ "ffn_mult": 2.4375,
1700
+ "no_op": false,
1701
+ "replace_with_linear": false,
1702
+ "sparsify": null
1703
+ }
1704
+ },
1705
+ {
1706
+ "attention": {
1707
+ "n_heads_in_group": 16,
1708
+ "no_op": false,
1709
+ "num_sink_tokens": null,
1710
+ "replace_with_linear": false,
1711
+ "sparsify": null,
1712
+ "unshifted_sink": false,
1713
+ "use_prefill_window_in_sink_attention": false,
1714
+ "window_length": null
1715
+ },
1716
+ "ffn": {
1717
+ "ffn_mult": 2.4375,
1718
+ "no_op": false,
1719
+ "replace_with_linear": false,
1720
+ "sparsify": null
1721
+ }
1722
+ },
1723
+ {
1724
+ "attention": {
1725
+ "n_heads_in_group": 16,
1726
+ "no_op": false,
1727
+ "num_sink_tokens": null,
1728
+ "replace_with_linear": false,
1729
+ "sparsify": null,
1730
+ "unshifted_sink": false,
1731
+ "use_prefill_window_in_sink_attention": false,
1732
+ "window_length": null
1733
+ },
1734
+ "ffn": {
1735
+ "ffn_mult": 2.4375,
1736
+ "no_op": false,
1737
+ "replace_with_linear": false,
1738
+ "sparsify": null
1739
+ }
1740
+ },
1741
+ {
1742
+ "attention": {
1743
+ "n_heads_in_group": null,
1744
+ "no_op": true,
1745
+ "num_sink_tokens": null,
1746
+ "replace_with_linear": false,
1747
+ "sparsify": null,
1748
+ "unshifted_sink": false,
1749
+ "use_prefill_window_in_sink_attention": false,
1750
+ "window_length": null
1751
+ },
1752
+ "ffn": {
1753
+ "ffn_mult": 2.4375,
1754
+ "no_op": false,
1755
+ "replace_with_linear": false,
1756
+ "sparsify": null
1757
+ }
1758
+ },
1759
+ {
1760
+ "attention": {
1761
+ "n_heads_in_group": null,
1762
+ "no_op": true,
1763
+ "num_sink_tokens": null,
1764
+ "replace_with_linear": false,
1765
+ "sparsify": null,
1766
+ "unshifted_sink": false,
1767
+ "use_prefill_window_in_sink_attention": false,
1768
+ "window_length": null
1769
+ },
1770
+ "ffn": {
1771
+ "ffn_mult": 2.4375,
1772
+ "no_op": false,
1773
+ "replace_with_linear": false,
1774
+ "sparsify": null
1775
+ }
1776
+ },
1777
+ {
1778
+ "attention": {
1779
+ "n_heads_in_group": 16,
1780
+ "no_op": false,
1781
+ "num_sink_tokens": null,
1782
+ "replace_with_linear": false,
1783
+ "sparsify": null,
1784
+ "unshifted_sink": false,
1785
+ "use_prefill_window_in_sink_attention": false,
1786
+ "window_length": null
1787
+ },
1788
+ "ffn": {
1789
+ "ffn_mult": 2.4375,
1790
+ "no_op": false,
1791
+ "replace_with_linear": false,
1792
+ "sparsify": null
1793
+ }
1794
+ },
1795
+ {
1796
+ "attention": {
1797
+ "n_heads_in_group": null,
1798
+ "no_op": true,
1799
+ "num_sink_tokens": null,
1800
+ "replace_with_linear": false,
1801
+ "sparsify": null,
1802
+ "unshifted_sink": false,
1803
+ "use_prefill_window_in_sink_attention": false,
1804
+ "window_length": null
1805
+ },
1806
+ "ffn": {
1807
+ "ffn_mult": null,
1808
+ "no_op": true,
1809
+ "replace_with_linear": false,
1810
+ "sparsify": null
1811
+ }
1812
+ },
1813
+ {
1814
+ "attention": {
1815
+ "n_heads_in_group": null,
1816
+ "no_op": true,
1817
+ "num_sink_tokens": null,
1818
+ "replace_with_linear": false,
1819
+ "sparsify": null,
1820
+ "unshifted_sink": false,
1821
+ "use_prefill_window_in_sink_attention": false,
1822
+ "window_length": null
1823
+ },
1824
+ "ffn": {
1825
+ "ffn_mult": 2.4375,
1826
+ "no_op": false,
1827
+ "replace_with_linear": false,
1828
+ "sparsify": null
1829
+ }
1830
+ },
1831
+ {
1832
+ "attention": {
1833
+ "n_heads_in_group": null,
1834
+ "no_op": true,
1835
+ "num_sink_tokens": null,
1836
+ "replace_with_linear": false,
1837
+ "sparsify": null,
1838
+ "unshifted_sink": false,
1839
+ "use_prefill_window_in_sink_attention": false,
1840
+ "window_length": null
1841
+ },
1842
+ "ffn": {
1843
+ "ffn_mult": 2.4375,
1844
+ "no_op": false,
1845
+ "replace_with_linear": false,
1846
+ "sparsify": null
1847
+ }
1848
+ },
1849
+ {
1850
+ "attention": {
1851
+ "n_heads_in_group": null,
1852
+ "no_op": true,
1853
+ "num_sink_tokens": null,
1854
+ "replace_with_linear": false,
1855
+ "sparsify": null,
1856
+ "unshifted_sink": false,
1857
+ "use_prefill_window_in_sink_attention": false,
1858
+ "window_length": null
1859
+ },
1860
+ "ffn": {
1861
+ "ffn_mult": 2.4375,
1862
+ "no_op": false,
1863
+ "replace_with_linear": false,
1864
+ "sparsify": null
1865
+ }
1866
+ },
1867
+ {
1868
+ "attention": {
1869
+ "n_heads_in_group": null,
1870
+ "no_op": true,
1871
+ "num_sink_tokens": null,
1872
+ "replace_with_linear": false,
1873
+ "sparsify": null,
1874
+ "unshifted_sink": false,
1875
+ "use_prefill_window_in_sink_attention": false,
1876
+ "window_length": null
1877
+ },
1878
+ "ffn": {
1879
+ "ffn_mult": 2.4375,
1880
+ "no_op": false,
1881
+ "replace_with_linear": false,
1882
+ "sparsify": null
1883
+ }
1884
+ },
1885
+ {
1886
+ "attention": {
1887
+ "n_heads_in_group": null,
1888
+ "no_op": true,
1889
+ "num_sink_tokens": null,
1890
+ "replace_with_linear": false,
1891
+ "sparsify": null,
1892
+ "unshifted_sink": false,
1893
+ "use_prefill_window_in_sink_attention": false,
1894
+ "window_length": null
1895
+ },
1896
+ "ffn": {
1897
+ "ffn_mult": 2.4375,
1898
+ "no_op": false,
1899
+ "replace_with_linear": false,
1900
+ "sparsify": null
1901
+ }
1902
+ },
1903
+ {
1904
+ "attention": {
1905
+ "n_heads_in_group": null,
1906
+ "no_op": true,
1907
+ "num_sink_tokens": null,
1908
+ "replace_with_linear": false,
1909
+ "sparsify": null,
1910
+ "unshifted_sink": false,
1911
+ "use_prefill_window_in_sink_attention": false,
1912
+ "window_length": null
1913
+ },
1914
+ "ffn": {
1915
+ "ffn_mult": 2.4375,
1916
+ "no_op": false,
1917
+ "replace_with_linear": false,
1918
+ "sparsify": null
1919
+ }
1920
+ },
1921
+ {
1922
+ "attention": {
1923
+ "n_heads_in_group": null,
1924
+ "no_op": true,
1925
+ "num_sink_tokens": null,
1926
+ "replace_with_linear": false,
1927
+ "sparsify": null,
1928
+ "unshifted_sink": false,
1929
+ "use_prefill_window_in_sink_attention": false,
1930
+ "window_length": null
1931
+ },
1932
+ "ffn": {
1933
+ "ffn_mult": 2.4375,
1934
+ "no_op": false,
1935
+ "replace_with_linear": false,
1936
+ "sparsify": null
1937
+ }
1938
+ },
1939
+ {
1940
+ "attention": {
1941
+ "n_heads_in_group": 16,
1942
+ "no_op": false,
1943
+ "num_sink_tokens": null,
1944
+ "replace_with_linear": false,
1945
+ "sparsify": null,
1946
+ "unshifted_sink": false,
1947
+ "use_prefill_window_in_sink_attention": false,
1948
+ "window_length": null
1949
+ },
1950
+ "ffn": {
1951
+ "ffn_mult": 2.4375,
1952
+ "no_op": false,
1953
+ "replace_with_linear": false,
1954
+ "sparsify": null
1955
+ }
1956
+ },
1957
+ {
1958
+ "attention": {
1959
+ "n_heads_in_group": null,
1960
+ "no_op": true,
1961
+ "num_sink_tokens": null,
1962
+ "replace_with_linear": false,
1963
+ "sparsify": null,
1964
+ "unshifted_sink": false,
1965
+ "use_prefill_window_in_sink_attention": false,
1966
+ "window_length": null
1967
+ },
1968
+ "ffn": {
1969
+ "ffn_mult": null,
1970
+ "no_op": true,
1971
+ "replace_with_linear": false,
1972
+ "sparsify": null
1973
+ }
1974
+ },
1975
+ {
1976
+ "attention": {
1977
+ "n_heads_in_group": null,
1978
+ "no_op": true,
1979
+ "num_sink_tokens": null,
1980
+ "replace_with_linear": false,
1981
+ "sparsify": null,
1982
+ "unshifted_sink": false,
1983
+ "use_prefill_window_in_sink_attention": false,
1984
+ "window_length": null
1985
+ },
1986
+ "ffn": {
1987
+ "ffn_mult": null,
1988
+ "no_op": true,
1989
+ "replace_with_linear": false,
1990
+ "sparsify": null
1991
+ }
1992
+ },
1993
+ {
1994
+ "attention": {
1995
+ "n_heads_in_group": null,
1996
+ "no_op": true,
1997
+ "num_sink_tokens": null,
1998
+ "replace_with_linear": false,
1999
+ "sparsify": null,
2000
+ "unshifted_sink": false,
2001
+ "use_prefill_window_in_sink_attention": false,
2002
+ "window_length": null
2003
+ },
2004
+ "ffn": {
2005
+ "ffn_mult": null,
2006
+ "no_op": true,
2007
+ "replace_with_linear": false,
2008
+ "sparsify": null
2009
+ }
2010
+ },
2011
+ {
2012
+ "attention": {
2013
+ "n_heads_in_group": null,
2014
+ "no_op": true,
2015
+ "num_sink_tokens": null,
2016
+ "replace_with_linear": false,
2017
+ "sparsify": null,
2018
+ "unshifted_sink": false,
2019
+ "use_prefill_window_in_sink_attention": false,
2020
+ "window_length": null
2021
+ },
2022
+ "ffn": {
2023
+ "ffn_mult": null,
2024
+ "no_op": true,
2025
+ "replace_with_linear": false,
2026
+ "sparsify": null
2027
+ }
2028
+ },
2029
+ {
2030
+ "attention": {
2031
+ "n_heads_in_group": null,
2032
+ "no_op": true,
2033
+ "num_sink_tokens": null,
2034
+ "replace_with_linear": false,
2035
+ "sparsify": null,
2036
+ "unshifted_sink": false,
2037
+ "use_prefill_window_in_sink_attention": false,
2038
+ "window_length": null
2039
+ },
2040
+ "ffn": {
2041
+ "ffn_mult": null,
2042
+ "no_op": true,
2043
+ "replace_with_linear": false,
2044
+ "sparsify": null
2045
+ }
2046
+ },
2047
+ {
2048
+ "attention": {
2049
+ "n_heads_in_group": 16,
2050
+ "no_op": false,
2051
+ "num_sink_tokens": null,
2052
+ "replace_with_linear": false,
2053
+ "sparsify": null,
2054
+ "unshifted_sink": false,
2055
+ "use_prefill_window_in_sink_attention": false,
2056
+ "window_length": null
2057
+ },
2058
+ "ffn": {
2059
+ "ffn_mult": 2.925,
2060
+ "no_op": false,
2061
+ "replace_with_linear": false,
2062
+ "sparsify": null
2063
+ }
2064
+ },
2065
+ {
2066
+ "attention": {
2067
+ "n_heads_in_group": 16,
2068
+ "no_op": false,
2069
+ "num_sink_tokens": null,
2070
+ "replace_with_linear": false,
2071
+ "sparsify": null,
2072
+ "unshifted_sink": false,
2073
+ "use_prefill_window_in_sink_attention": false,
2074
+ "window_length": null
2075
+ },
2076
+ "ffn": {
2077
+ "ffn_mult": 4.875,
2078
+ "no_op": false,
2079
+ "replace_with_linear": false,
2080
+ "sparsify": null
2081
+ }
2082
+ },
2083
+ {
2084
+ "attention": {
2085
+ "n_heads_in_group": null,
2086
+ "no_op": true,
2087
+ "num_sink_tokens": null,
2088
+ "replace_with_linear": false,
2089
+ "sparsify": null,
2090
+ "unshifted_sink": false,
2091
+ "use_prefill_window_in_sink_attention": false,
2092
+ "window_length": null
2093
+ },
2094
+ "ffn": {
2095
+ "ffn_mult": 4.875,
2096
+ "no_op": false,
2097
+ "replace_with_linear": false,
2098
+ "sparsify": null
2099
+ }
2100
+ },
2101
+ {
2102
+ "attention": {
2103
+ "n_heads_in_group": 16,
2104
+ "no_op": false,
2105
+ "num_sink_tokens": null,
2106
+ "replace_with_linear": false,
2107
+ "sparsify": null,
2108
+ "unshifted_sink": false,
2109
+ "use_prefill_window_in_sink_attention": false,
2110
+ "window_length": null
2111
+ },
2112
+ "ffn": {
2113
+ "ffn_mult": 4.875,
2114
+ "no_op": false,
2115
+ "replace_with_linear": false,
2116
+ "sparsify": null
2117
+ }
2118
+ },
2119
+ {
2120
+ "attention": {
2121
+ "n_heads_in_group": null,
2122
+ "no_op": true,
2123
+ "num_sink_tokens": null,
2124
+ "replace_with_linear": false,
2125
+ "sparsify": null,
2126
+ "unshifted_sink": false,
2127
+ "use_prefill_window_in_sink_attention": false,
2128
+ "window_length": null
2129
+ },
2130
+ "ffn": {
2131
+ "ffn_mult": null,
2132
+ "no_op": true,
2133
+ "replace_with_linear": false,
2134
+ "sparsify": null
2135
+ }
2136
+ },
2137
+ {
2138
+ "attention": {
2139
+ "n_heads_in_group": null,
2140
+ "no_op": true,
2141
+ "num_sink_tokens": null,
2142
+ "replace_with_linear": false,
2143
+ "sparsify": null,
2144
+ "unshifted_sink": false,
2145
+ "use_prefill_window_in_sink_attention": false,
2146
+ "window_length": null
2147
+ },
2148
+ "ffn": {
2149
+ "ffn_mult": null,
2150
+ "no_op": true,
2151
+ "replace_with_linear": false,
2152
+ "sparsify": null
2153
+ }
2154
+ },
2155
+ {
2156
+ "attention": {
2157
+ "n_heads_in_group": null,
2158
+ "no_op": true,
2159
+ "num_sink_tokens": null,
2160
+ "replace_with_linear": false,
2161
+ "sparsify": null,
2162
+ "unshifted_sink": false,
2163
+ "use_prefill_window_in_sink_attention": false,
2164
+ "window_length": null
2165
+ },
2166
+ "ffn": {
2167
+ "ffn_mult": null,
2168
+ "no_op": true,
2169
+ "replace_with_linear": false,
2170
+ "sparsify": null
2171
+ }
2172
+ },
2173
+ {
2174
+ "attention": {
2175
+ "n_heads_in_group": null,
2176
+ "no_op": true,
2177
+ "num_sink_tokens": null,
2178
+ "replace_with_linear": false,
2179
+ "sparsify": null,
2180
+ "unshifted_sink": false,
2181
+ "use_prefill_window_in_sink_attention": false,
2182
+ "window_length": null
2183
+ },
2184
+ "ffn": {
2185
+ "ffn_mult": null,
2186
+ "no_op": true,
2187
+ "replace_with_linear": false,
2188
+ "sparsify": null
2189
+ }
2190
+ },
2191
+ {
2192
+ "attention": {
2193
+ "n_heads_in_group": null,
2194
+ "no_op": true,
2195
+ "num_sink_tokens": null,
2196
+ "replace_with_linear": false,
2197
+ "sparsify": null,
2198
+ "unshifted_sink": false,
2199
+ "use_prefill_window_in_sink_attention": false,
2200
+ "window_length": null
2201
+ },
2202
+ "ffn": {
2203
+ "ffn_mult": null,
2204
+ "no_op": true,
2205
+ "replace_with_linear": false,
2206
+ "sparsify": null
2207
+ }
2208
+ },
2209
+ {
2210
+ "attention": {
2211
+ "n_heads_in_group": null,
2212
+ "no_op": true,
2213
+ "num_sink_tokens": null,
2214
+ "replace_with_linear": false,
2215
+ "sparsify": null,
2216
+ "unshifted_sink": false,
2217
+ "use_prefill_window_in_sink_attention": false,
2218
+ "window_length": null
2219
+ },
2220
+ "ffn": {
2221
+ "ffn_mult": null,
2222
+ "no_op": true,
2223
+ "replace_with_linear": false,
2224
+ "sparsify": null
2225
+ }
2226
+ },
2227
+ {
2228
+ "attention": {
2229
+ "n_heads_in_group": null,
2230
+ "no_op": true,
2231
+ "num_sink_tokens": null,
2232
+ "replace_with_linear": false,
2233
+ "sparsify": null,
2234
+ "unshifted_sink": false,
2235
+ "use_prefill_window_in_sink_attention": false,
2236
+ "window_length": null
2237
+ },
2238
+ "ffn": {
2239
+ "ffn_mult": null,
2240
+ "no_op": true,
2241
+ "replace_with_linear": false,
2242
+ "sparsify": null
2243
+ }
2244
+ },
2245
+ {
2246
+ "attention": {
2247
+ "n_heads_in_group": null,
2248
+ "no_op": true,
2249
+ "num_sink_tokens": null,
2250
+ "replace_with_linear": false,
2251
+ "sparsify": null,
2252
+ "unshifted_sink": false,
2253
+ "use_prefill_window_in_sink_attention": false,
2254
+ "window_length": null
2255
+ },
2256
+ "ffn": {
2257
+ "ffn_mult": null,
2258
+ "no_op": true,
2259
+ "replace_with_linear": false,
2260
+ "sparsify": null
2261
+ }
2262
+ },
2263
+ {
2264
+ "attention": {
2265
+ "n_heads_in_group": null,
2266
+ "no_op": true,
2267
+ "num_sink_tokens": null,
2268
+ "replace_with_linear": false,
2269
+ "sparsify": null,
2270
+ "unshifted_sink": false,
2271
+ "use_prefill_window_in_sink_attention": false,
2272
+ "window_length": null
2273
+ },
2274
+ "ffn": {
2275
+ "ffn_mult": 36.5625,
2276
+ "no_op": false,
2277
+ "replace_with_linear": false,
2278
+ "sparsify": null
2279
+ }
2280
+ },
2281
+ {
2282
+ "attention": {
2283
+ "n_heads_in_group": null,
2284
+ "no_op": true,
2285
+ "num_sink_tokens": null,
2286
+ "replace_with_linear": false,
2287
+ "sparsify": null,
2288
+ "unshifted_sink": false,
2289
+ "use_prefill_window_in_sink_attention": false,
2290
+ "window_length": null
2291
+ },
2292
+ "ffn": {
2293
+ "ffn_mult": null,
2294
+ "no_op": true,
2295
+ "replace_with_linear": false,
2296
+ "sparsify": null
2297
+ }
2298
+ },
2299
+ {
2300
+ "attention": {
2301
+ "n_heads_in_group": null,
2302
+ "no_op": true,
2303
+ "num_sink_tokens": null,
2304
+ "replace_with_linear": false,
2305
+ "sparsify": null,
2306
+ "unshifted_sink": false,
2307
+ "use_prefill_window_in_sink_attention": false,
2308
+ "window_length": null
2309
+ },
2310
+ "ffn": {
2311
+ "ffn_mult": null,
2312
+ "no_op": true,
2313
+ "replace_with_linear": false,
2314
+ "sparsify": null
2315
+ }
2316
+ },
2317
+ {
2318
+ "attention": {
2319
+ "n_heads_in_group": null,
2320
+ "no_op": true,
2321
+ "num_sink_tokens": null,
2322
+ "replace_with_linear": false,
2323
+ "sparsify": null,
2324
+ "unshifted_sink": false,
2325
+ "use_prefill_window_in_sink_attention": false,
2326
+ "window_length": null
2327
+ },
2328
+ "ffn": {
2329
+ "ffn_mult": null,
2330
+ "no_op": true,
2331
+ "replace_with_linear": false,
2332
+ "sparsify": null
2333
+ }
2334
+ },
2335
+ {
2336
+ "attention": {
2337
+ "n_heads_in_group": null,
2338
+ "no_op": true,
2339
+ "num_sink_tokens": null,
2340
+ "replace_with_linear": false,
2341
+ "sparsify": null,
2342
+ "unshifted_sink": false,
2343
+ "use_prefill_window_in_sink_attention": false,
2344
+ "window_length": null
2345
+ },
2346
+ "ffn": {
2347
+ "ffn_mult": null,
2348
+ "no_op": true,
2349
+ "replace_with_linear": false,
2350
+ "sparsify": null
2351
+ }
2352
+ },
2353
+ {
2354
+ "attention": {
2355
+ "n_heads_in_group": null,
2356
+ "no_op": true,
2357
+ "num_sink_tokens": null,
2358
+ "replace_with_linear": false,
2359
+ "sparsify": null,
2360
+ "unshifted_sink": false,
2361
+ "use_prefill_window_in_sink_attention": false,
2362
+ "window_length": null
2363
+ },
2364
+ "ffn": {
2365
+ "ffn_mult": null,
2366
+ "no_op": true,
2367
+ "replace_with_linear": false,
2368
+ "sparsify": null
2369
+ }
2370
+ },
2371
+ {
2372
+ "attention": {
2373
+ "n_heads_in_group": null,
2374
+ "no_op": true,
2375
+ "num_sink_tokens": null,
2376
+ "replace_with_linear": false,
2377
+ "sparsify": null,
2378
+ "unshifted_sink": false,
2379
+ "use_prefill_window_in_sink_attention": false,
2380
+ "window_length": null
2381
+ },
2382
+ "ffn": {
2383
+ "ffn_mult": null,
2384
+ "no_op": true,
2385
+ "replace_with_linear": false,
2386
+ "sparsify": null
2387
+ }
2388
+ },
2389
+ {
2390
+ "attention": {
2391
+ "n_heads_in_group": null,
2392
+ "no_op": true,
2393
+ "num_sink_tokens": null,
2394
+ "replace_with_linear": false,
2395
+ "sparsify": null,
2396
+ "unshifted_sink": false,
2397
+ "use_prefill_window_in_sink_attention": false,
2398
+ "window_length": null
2399
+ },
2400
+ "ffn": {
2401
+ "ffn_mult": null,
2402
+ "no_op": true,
2403
+ "replace_with_linear": false,
2404
+ "sparsify": null
2405
+ }
2406
+ },
2407
+ {
2408
+ "attention": {
2409
+ "n_heads_in_group": null,
2410
+ "no_op": true,
2411
+ "num_sink_tokens": null,
2412
+ "replace_with_linear": false,
2413
+ "sparsify": null,
2414
+ "unshifted_sink": false,
2415
+ "use_prefill_window_in_sink_attention": false,
2416
+ "window_length": null
2417
+ },
2418
+ "ffn": {
2419
+ "ffn_mult": null,
2420
+ "no_op": true,
2421
+ "replace_with_linear": false,
2422
+ "sparsify": null
2423
+ }
2424
+ },
2425
+ {
2426
+ "attention": {
2427
+ "n_heads_in_group": null,
2428
+ "no_op": true,
2429
+ "num_sink_tokens": null,
2430
+ "replace_with_linear": false,
2431
+ "sparsify": null,
2432
+ "unshifted_sink": false,
2433
+ "use_prefill_window_in_sink_attention": false,
2434
+ "window_length": null
2435
+ },
2436
+ "ffn": {
2437
+ "ffn_mult": 39.0,
2438
+ "no_op": false,
2439
+ "replace_with_linear": false,
2440
+ "sparsify": null
2441
+ }
2442
+ },
2443
+ {
2444
+ "attention": {
2445
+ "n_heads_in_group": null,
2446
+ "no_op": true,
2447
+ "num_sink_tokens": null,
2448
+ "replace_with_linear": false,
2449
+ "sparsify": null,
2450
+ "unshifted_sink": false,
2451
+ "use_prefill_window_in_sink_attention": false,
2452
+ "window_length": null
2453
+ },
2454
+ "ffn": {
2455
+ "ffn_mult": null,
2456
+ "no_op": true,
2457
+ "replace_with_linear": false,
2458
+ "sparsify": null
2459
+ }
2460
+ },
2461
+ {
2462
+ "attention": {
2463
+ "n_heads_in_group": null,
2464
+ "no_op": true,
2465
+ "num_sink_tokens": null,
2466
+ "replace_with_linear": false,
2467
+ "sparsify": null,
2468
+ "unshifted_sink": false,
2469
+ "use_prefill_window_in_sink_attention": false,
2470
+ "window_length": null
2471
+ },
2472
+ "ffn": {
2473
+ "ffn_mult": null,
2474
+ "no_op": true,
2475
+ "replace_with_linear": false,
2476
+ "sparsify": null
2477
+ }
2478
+ },
2479
+ {
2480
+ "attention": {
2481
+ "n_heads_in_group": null,
2482
+ "no_op": true,
2483
+ "num_sink_tokens": null,
2484
+ "replace_with_linear": false,
2485
+ "sparsify": null,
2486
+ "unshifted_sink": false,
2487
+ "use_prefill_window_in_sink_attention": false,
2488
+ "window_length": null
2489
+ },
2490
+ "ffn": {
2491
+ "ffn_mult": null,
2492
+ "no_op": true,
2493
+ "replace_with_linear": false,
2494
+ "sparsify": null
2495
+ }
2496
+ },
2497
+ {
2498
+ "attention": {
2499
+ "n_heads_in_group": null,
2500
+ "no_op": true,
2501
+ "num_sink_tokens": null,
2502
+ "replace_with_linear": false,
2503
+ "sparsify": null,
2504
+ "unshifted_sink": false,
2505
+ "use_prefill_window_in_sink_attention": false,
2506
+ "window_length": null
2507
+ },
2508
+ "ffn": {
2509
+ "ffn_mult": null,
2510
+ "no_op": true,
2511
+ "replace_with_linear": false,
2512
+ "sparsify": null
2513
+ }
2514
+ },
2515
+ {
2516
+ "attention": {
2517
+ "n_heads_in_group": null,
2518
+ "no_op": true,
2519
+ "num_sink_tokens": null,
2520
+ "replace_with_linear": false,
2521
+ "sparsify": null,
2522
+ "unshifted_sink": false,
2523
+ "use_prefill_window_in_sink_attention": false,
2524
+ "window_length": null
2525
+ },
2526
+ "ffn": {
2527
+ "ffn_mult": null,
2528
+ "no_op": true,
2529
+ "replace_with_linear": false,
2530
+ "sparsify": null
2531
+ }
2532
+ },
2533
+ {
2534
+ "attention": {
2535
+ "n_heads_in_group": null,
2536
+ "no_op": true,
2537
+ "num_sink_tokens": null,
2538
+ "replace_with_linear": false,
2539
+ "sparsify": null,
2540
+ "unshifted_sink": false,
2541
+ "use_prefill_window_in_sink_attention": false,
2542
+ "window_length": null
2543
+ },
2544
+ "ffn": {
2545
+ "ffn_mult": null,
2546
+ "no_op": true,
2547
+ "replace_with_linear": false,
2548
+ "sparsify": null
2549
+ }
2550
+ },
2551
+ {
2552
+ "attention": {
2553
+ "n_heads_in_group": null,
2554
+ "no_op": true,
2555
+ "num_sink_tokens": null,
2556
+ "replace_with_linear": false,
2557
+ "sparsify": null,
2558
+ "unshifted_sink": false,
2559
+ "use_prefill_window_in_sink_attention": false,
2560
+ "window_length": null
2561
+ },
2562
+ "ffn": {
2563
+ "ffn_mult": null,
2564
+ "no_op": true,
2565
+ "replace_with_linear": false,
2566
+ "sparsify": null
2567
+ }
2568
+ },
2569
+ {
2570
+ "attention": {
2571
+ "n_heads_in_group": null,
2572
+ "no_op": true,
2573
+ "num_sink_tokens": null,
2574
+ "replace_with_linear": false,
2575
+ "sparsify": null,
2576
+ "unshifted_sink": false,
2577
+ "use_prefill_window_in_sink_attention": false,
2578
+ "window_length": null
2579
+ },
2580
+ "ffn": {
2581
+ "ffn_mult": null,
2582
+ "no_op": true,
2583
+ "replace_with_linear": false,
2584
+ "sparsify": null
2585
+ }
2586
+ },
2587
+ {
2588
+ "attention": {
2589
+ "n_heads_in_group": null,
2590
+ "no_op": true,
2591
+ "num_sink_tokens": null,
2592
+ "replace_with_linear": false,
2593
+ "sparsify": null,
2594
+ "unshifted_sink": false,
2595
+ "use_prefill_window_in_sink_attention": false,
2596
+ "window_length": null
2597
+ },
2598
+ "ffn": {
2599
+ "ffn_mult": 31.40625,
2600
+ "no_op": false,
2601
+ "replace_with_linear": false,
2602
+ "sparsify": null
2603
+ }
2604
+ },
2605
+ {
2606
+ "attention": {
2607
+ "n_heads_in_group": null,
2608
+ "no_op": true,
2609
+ "num_sink_tokens": null,
2610
+ "replace_with_linear": false,
2611
+ "sparsify": null,
2612
+ "unshifted_sink": false,
2613
+ "use_prefill_window_in_sink_attention": false,
2614
+ "window_length": null
2615
+ },
2616
+ "ffn": {
2617
+ "ffn_mult": null,
2618
+ "no_op": true,
2619
+ "replace_with_linear": false,
2620
+ "sparsify": null
2621
+ }
2622
+ },
2623
+ {
2624
+ "attention": {
2625
+ "n_heads_in_group": null,
2626
+ "no_op": true,
2627
+ "num_sink_tokens": null,
2628
+ "replace_with_linear": false,
2629
+ "sparsify": null,
2630
+ "unshifted_sink": false,
2631
+ "use_prefill_window_in_sink_attention": false,
2632
+ "window_length": null
2633
+ },
2634
+ "ffn": {
2635
+ "ffn_mult": null,
2636
+ "no_op": true,
2637
+ "replace_with_linear": false,
2638
+ "sparsify": null
2639
+ }
2640
+ },
2641
+ {
2642
+ "attention": {
2643
+ "n_heads_in_group": null,
2644
+ "no_op": true,
2645
+ "num_sink_tokens": null,
2646
+ "replace_with_linear": false,
2647
+ "sparsify": null,
2648
+ "unshifted_sink": false,
2649
+ "use_prefill_window_in_sink_attention": false,
2650
+ "window_length": null
2651
+ },
2652
+ "ffn": {
2653
+ "ffn_mult": null,
2654
+ "no_op": true,
2655
+ "replace_with_linear": false,
2656
+ "sparsify": null
2657
+ }
2658
+ },
2659
+ {
2660
+ "attention": {
2661
+ "n_heads_in_group": null,
2662
+ "no_op": true,
2663
+ "num_sink_tokens": null,
2664
+ "replace_with_linear": false,
2665
+ "sparsify": null,
2666
+ "unshifted_sink": false,
2667
+ "use_prefill_window_in_sink_attention": false,
2668
+ "window_length": null
2669
+ },
2670
+ "ffn": {
2671
+ "ffn_mult": null,
2672
+ "no_op": true,
2673
+ "replace_with_linear": false,
2674
+ "sparsify": null
2675
+ }
2676
+ },
2677
+ {
2678
+ "attention": {
2679
+ "n_heads_in_group": null,
2680
+ "no_op": true,
2681
+ "num_sink_tokens": null,
2682
+ "replace_with_linear": false,
2683
+ "sparsify": null,
2684
+ "unshifted_sink": false,
2685
+ "use_prefill_window_in_sink_attention": false,
2686
+ "window_length": null
2687
+ },
2688
+ "ffn": {
2689
+ "ffn_mult": null,
2690
+ "no_op": true,
2691
+ "replace_with_linear": false,
2692
+ "sparsify": null
2693
+ }
2694
+ },
2695
+ {
2696
+ "attention": {
2697
+ "n_heads_in_group": null,
2698
+ "no_op": true,
2699
+ "num_sink_tokens": null,
2700
+ "replace_with_linear": false,
2701
+ "sparsify": null,
2702
+ "unshifted_sink": false,
2703
+ "use_prefill_window_in_sink_attention": false,
2704
+ "window_length": null
2705
+ },
2706
+ "ffn": {
2707
+ "ffn_mult": 27.5625,
2708
+ "no_op": false,
2709
+ "replace_with_linear": false,
2710
+ "sparsify": null
2711
+ }
2712
+ },
2713
+ {
2714
+ "attention": {
2715
+ "n_heads_in_group": null,
2716
+ "no_op": true,
2717
+ "num_sink_tokens": null,
2718
+ "replace_with_linear": false,
2719
+ "sparsify": null,
2720
+ "unshifted_sink": false,
2721
+ "use_prefill_window_in_sink_attention": false,
2722
+ "window_length": null
2723
+ },
2724
+ "ffn": {
2725
+ "ffn_mult": 1.95,
2726
+ "no_op": false,
2727
+ "replace_with_linear": false,
2728
+ "sparsify": null
2729
+ }
2730
+ },
2731
+ {
2732
+ "attention": {
2733
+ "n_heads_in_group": 16,
2734
+ "no_op": false,
2735
+ "num_sink_tokens": null,
2736
+ "replace_with_linear": false,
2737
+ "sparsify": null,
2738
+ "unshifted_sink": false,
2739
+ "use_prefill_window_in_sink_attention": false,
2740
+ "window_length": null
2741
+ },
2742
+ "ffn": {
2743
+ "ffn_mult": 1.95,
2744
+ "no_op": false,
2745
+ "replace_with_linear": false,
2746
+ "sparsify": null
2747
+ }
2748
+ },
2749
+ {
2750
+ "attention": {
2751
+ "n_heads_in_group": 16,
2752
+ "no_op": false,
2753
+ "num_sink_tokens": null,
2754
+ "replace_with_linear": false,
2755
+ "sparsify": null,
2756
+ "unshifted_sink": false,
2757
+ "use_prefill_window_in_sink_attention": false,
2758
+ "window_length": null
2759
+ },
2760
+ "ffn": {
2761
+ "ffn_mult": 2.4375,
2762
+ "no_op": false,
2763
+ "replace_with_linear": false,
2764
+ "sparsify": null
2765
+ }
2766
+ },
2767
+ {
2768
+ "attention": {
2769
+ "n_heads_in_group": null,
2770
+ "no_op": true,
2771
+ "num_sink_tokens": null,
2772
+ "replace_with_linear": false,
2773
+ "sparsify": null,
2774
+ "unshifted_sink": false,
2775
+ "use_prefill_window_in_sink_attention": false,
2776
+ "window_length": null
2777
+ },
2778
+ "ffn": {
2779
+ "ffn_mult": null,
2780
+ "no_op": true,
2781
+ "replace_with_linear": false,
2782
+ "sparsify": null
2783
+ }
2784
+ },
2785
+ {
2786
+ "attention": {
2787
+ "n_heads_in_group": 16,
2788
+ "no_op": false,
2789
+ "num_sink_tokens": null,
2790
+ "replace_with_linear": false,
2791
+ "sparsify": null,
2792
+ "unshifted_sink": false,
2793
+ "use_prefill_window_in_sink_attention": false,
2794
+ "window_length": null
2795
+ },
2796
+ "ffn": {
2797
+ "ffn_mult": 2.4375,
2798
+ "no_op": false,
2799
+ "replace_with_linear": false,
2800
+ "sparsify": null
2801
+ }
2802
+ },
2803
+ {
2804
+ "attention": {
2805
+ "n_heads_in_group": 16,
2806
+ "no_op": false,
2807
+ "num_sink_tokens": null,
2808
+ "replace_with_linear": false,
2809
+ "sparsify": null,
2810
+ "unshifted_sink": false,
2811
+ "use_prefill_window_in_sink_attention": false,
2812
+ "window_length": null
2813
+ },
2814
+ "ffn": {
2815
+ "ffn_mult": 2.4375,
2816
+ "no_op": false,
2817
+ "replace_with_linear": false,
2818
+ "sparsify": null
2819
+ }
2820
+ },
2821
+ {
2822
+ "attention": {
2823
+ "n_heads_in_group": 16,
2824
+ "no_op": false,
2825
+ "num_sink_tokens": null,
2826
+ "replace_with_linear": false,
2827
+ "sparsify": null,
2828
+ "unshifted_sink": false,
2829
+ "use_prefill_window_in_sink_attention": false,
2830
+ "window_length": null
2831
+ },
2832
+ "ffn": {
2833
+ "ffn_mult": 3.4125,
2834
+ "no_op": false,
2835
+ "replace_with_linear": false,
2836
+ "sparsify": null
2837
+ }
2838
+ },
2839
+ {
2840
+ "attention": {
2841
+ "n_heads_in_group": 16,
2842
+ "no_op": false,
2843
+ "num_sink_tokens": null,
2844
+ "replace_with_linear": false,
2845
+ "sparsify": null,
2846
+ "unshifted_sink": false,
2847
+ "use_prefill_window_in_sink_attention": false,
2848
+ "window_length": null
2849
+ },
2850
+ "ffn": {
2851
+ "ffn_mult": 4.875,
2852
+ "no_op": false,
2853
+ "replace_with_linear": false,
2854
+ "sparsify": null
2855
+ }
2856
+ },
2857
+ {
2858
+ "attention": {
2859
+ "n_heads_in_group": 16,
2860
+ "no_op": false,
2861
+ "num_sink_tokens": null,
2862
+ "replace_with_linear": false,
2863
+ "sparsify": null,
2864
+ "unshifted_sink": false,
2865
+ "use_prefill_window_in_sink_attention": false,
2866
+ "window_length": null
2867
+ },
2868
+ "ffn": {
2869
+ "ffn_mult": 4.875,
2870
+ "no_op": false,
2871
+ "replace_with_linear": false,
2872
+ "sparsify": null
2873
+ }
2874
+ },
2875
+ {
2876
+ "attention": {
2877
+ "n_heads_in_group": 16,
2878
+ "no_op": false,
2879
+ "num_sink_tokens": null,
2880
+ "replace_with_linear": false,
2881
+ "sparsify": null,
2882
+ "unshifted_sink": false,
2883
+ "use_prefill_window_in_sink_attention": false,
2884
+ "window_length": null
2885
+ },
2886
+ "ffn": {
2887
+ "ffn_mult": 4.875,
2888
+ "no_op": false,
2889
+ "replace_with_linear": false,
2890
+ "sparsify": null
2891
+ }
2892
+ },
2893
+ {
2894
+ "attention": {
2895
+ "n_heads_in_group": 16,
2896
+ "no_op": false,
2897
+ "num_sink_tokens": null,
2898
+ "replace_with_linear": false,
2899
+ "sparsify": null,
2900
+ "unshifted_sink": false,
2901
+ "use_prefill_window_in_sink_attention": false,
2902
+ "window_length": null
2903
+ },
2904
+ "ffn": {
2905
+ "ffn_mult": 4.875,
2906
+ "no_op": false,
2907
+ "replace_with_linear": false,
2908
+ "sparsify": null
2909
+ }
2910
+ },
2911
+ {
2912
+ "attention": {
2913
+ "n_heads_in_group": 16,
2914
+ "no_op": false,
2915
+ "num_sink_tokens": null,
2916
+ "replace_with_linear": false,
2917
+ "sparsify": null,
2918
+ "unshifted_sink": false,
2919
+ "use_prefill_window_in_sink_attention": false,
2920
+ "window_length": null
2921
+ },
2922
+ "ffn": {
2923
+ "ffn_mult": 2.4375,
2924
+ "no_op": false,
2925
+ "replace_with_linear": false,
2926
+ "sparsify": null
2927
+ }
2928
+ }
2929
+ ],
2930
+ "bos_token_id": 128000,
2931
+ "eos_token_id": [
2932
+ 128001,
2933
+ 128008,
2934
+ 128009
2935
+ ],
2936
+ "hidden_act": "silu",
2937
+ "hidden_size": 16384,
2938
+ "initializer_range": 0.02,
2939
+ "intermediate_size": null,
2940
+ "max_position_embeddings": 131072,
2941
+ "mlp_bias": false,
2942
+ "model_type": "nemotron-nas",
2943
+ "num_attention_heads": 128,
2944
+ "num_hidden_layers": 162,
2945
+ "num_key_value_heads": null,
2946
+ "pretraining_tp": 1,
2947
+ "rms_norm_eps": 1e-05,
2948
+ "rope_scaling": {
2949
+ "factor": 16.0,
2950
+ "high_freq_factor": 4.0,
2951
+ "low_freq_factor": 1.0,
2952
+ "original_max_position_embeddings": 8192,
2953
+ "rope_type": "llama3"
2954
+ },
2955
+ "rope_theta": 500000.0,
2956
+ "tie_word_embeddings": false,
2957
+ "torch_dtype": "bfloat16",
2958
+ "transformers_version": "4.45.1",
2959
+ "use_cache": true,
2960
+ "vocab_size": 128256,
2961
+ "quantization_config": {
2962
+ "quant_method": "exl3",
2963
+ "version": "0.0.1",
2964
+ "bits": 3.45,
2965
+ "head_bits": 6,
2966
+ "calibration": {
2967
+ "rows": 100,
2968
+ "cols": 2048
2969
+ },
2970
+ "out_scales": "auto"
2971
+ }
2972
+ }
configuration_decilm.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Nvidia Corporation. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import dataclasses
17
+ import warnings
18
+ from typing import Dict, Any
19
+
20
+ from transformers.utils import is_flash_attn_2_available
21
+
22
+ from .block_config import BlockConfig
23
+ from .transformers_4_44_2__configuration_llama import LlamaConfig
24
+ from .transformers_4_44_2__modeling_rope_utils import \
25
+ rope_config_validation # fake import to make AutoConfig infer the dependency
26
+
27
+ rope_config_validation # this line is here to make sure that auto-formatting doesn't remove the import
28
+
29
+
30
+ class DeciLMConfig(LlamaConfig):
31
+ model_type = "nemotron-nas"
32
+
33
+ def __init__(
34
+ self,
35
+ block_configs: list[dict] | list[BlockConfig] = None,
36
+ **kwargs,
37
+ ):
38
+ attn_implementation = kwargs.pop("attn_implementation", None)
39
+ if attn_implementation is None and is_flash_attn_2_available():
40
+ attn_implementation = "flash_attention_2"
41
+
42
+ if block_configs is not None:
43
+ if isinstance(block_configs[0], dict):
44
+ block_configs = [BlockConfig(**conf) for conf in block_configs]
45
+
46
+ using_unshifted_sink = any([block_config.attention.unshifted_sink for block_config in block_configs])
47
+ if using_unshifted_sink and attn_implementation != "eager":
48
+ warnings.warn("Forcing attn_implementation='eager' since some attention layers use unshifted sink")
49
+ attn_implementation = "eager"
50
+
51
+ super().__init__(attn_implementation=attn_implementation, **kwargs)
52
+
53
+ self.intermediate_size = None
54
+ self.num_key_value_heads = None
55
+
56
+ if block_configs is not None:
57
+ assert len(block_configs) == self.num_hidden_layers
58
+
59
+ self.block_configs: list[BlockConfig] = block_configs
60
+
61
+ def to_dict(self) -> Dict[str, Any]:
62
+ self_dict = super().to_dict()
63
+ if self.block_configs is not None:
64
+ self_dict["block_configs"] = [dataclasses.asdict(conf) for conf in self.block_configs]
65
+ return self_dict
model-00001-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f483721b59acce9b84aec2f5486b0da785cc1621023da50a266d0c24f457bd7d
3
+ size 8559470168
model-00002-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1588d1685d8481721700a5187f5739040dd2873a08c06c9e60109cc532c04b02
3
+ size 8298966336
model-00003-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c202cbac6af85611dc745ec2c46c41960a5bd84ecaccf6e2d0b6afe5b97ac5ef
3
+ size 8431117688
model-00004-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fa4c85ccc330867766193ae00b3bc98bce97dde5e59c66036c968035692c0d5
3
+ size 8436793520
model-00005-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:648e081717f543558b801cb00c16be8c3f531c2ab1da3596a38837be487f5aa5
3
+ size 8283675152
model-00006-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b0ee736386ab6f9c708d7579ae14d54530e06bc8bd06e902fbf53543f5a3ef1
3
+ size 8248023568
model-00007-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:744dea8ca02cab520726fc1c962ca44218df57e23f8a6ca9ddba92ed471816a0
3
+ size 8399791016
model-00008-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c737640754d4fed981a55f0558a4e6e355cc5f6130a3b8543e7335fa5f7c979
3
+ size 8428369552
model-00009-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d023790265c09ea0933f49f63641fbb6ae7750eda23c62c72358d0344afeada
3
+ size 2509331936
model-00010-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0588da9d0aa8f1a78326f1dae03ab549d3c45ac107e61e333398ef2662484d40
3
+ size 8182116528
model-00011-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c86b0ebfc3b44e1630aafaab4a56b472ba23c7a66f02f50406f3823f93c4d107
3
+ size 8727535792
model-00012-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc7d52fd86a39345cec7bff4773bc3ca659f527159da12b5f1eb08bfac6c1690
3
+ size 7730791584
model-00013-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:560edb02aae3ee25411d822ac3a7bd68dc34ae3f017d87d10cee00db2219eb43
3
+ size 8499282024
model-00014-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9230a93e538b29ba75bb5f989117f19318804242f5c2eb45289efa960ed96b70
3
+ size 8141340744
model-00015-of-00015.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d18464ca6fef580a9342f9d13f5ae1a0075864c1354422fb4e474199c6ee477
3
+ size 2411525368
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_decilm.py ADDED
@@ -0,0 +1,1681 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Nvidia Corporation, Google Inc, HuggingFace Inc, EleutherAI. All rights reserved.
3
+ #
4
+ # This code for Nvidia's model is based on the Llama modeling code by HuggingFace,
5
+ # which is in turn based on EleutherAI's GPT-NeoX library and the GPT-NeoX and
6
+ # OPT implementations in this library.
7
+ # Sliding window code based on Gemma2 by Google.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ import math
22
+ from typing import List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.nn.functional as F
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+ from transformers import GenerationConfig
30
+ from transformers.generation.utils import NEED_SETUP_CACHE_CLASSES_MAPPING, GenerationMixin, GenerateOutput
31
+ from transformers.modeling_utils import PreTrainedModel
32
+ from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
33
+ from transformers.utils import (
34
+ add_start_docstrings,
35
+ add_start_docstrings_to_model_forward,
36
+ is_flash_attn_greater_or_equal_2_10,
37
+ logging,
38
+ replace_return_docstrings,
39
+ )
40
+
41
+ from .block_config import AttentionConfig, FFNConfig
42
+ from .configuration_decilm import DeciLMConfig
43
+ from .transformers_4_44_2__activations import ACT2FN
44
+ from .transformers_4_44_2__cache_utils import Cache, StaticCache
45
+ from .transformers_4_44_2__modeling_attn_mask_utils import AttentionMaskConverter
46
+ from .transformers_4_44_2__modeling_flash_attention_utils_backward_compat import _flash_attention_forward
47
+ from .transformers_4_44_2__modeling_outputs import (
48
+ BaseModelOutputWithPast,
49
+ CausalLMOutputWithPast,
50
+ QuestionAnsweringModelOutput,
51
+ SequenceClassifierOutputWithPast,
52
+ TokenClassifierOutput,
53
+ )
54
+ from .transformers_4_44_2__modeling_rope_utils import ROPE_INIT_FUNCTIONS
55
+ from .transformers_4_44_2__pytorch_utils import ALL_LAYERNORM_LAYERS
56
+ from .variable_cache import VariableCache
57
+
58
+ MODEL_FOR_CAUSAL_LM_MAPPING_NAMES[DeciLMConfig.model_type] = "DeciLMForCausalLM"
59
+ logger = logging.get_logger(__name__)
60
+
61
+ _CONFIG_FOR_DOC = "DeciLMConfig"
62
+
63
+
64
+ def _prepare_4d_causal_attention_mask_with_cache_position(
65
+ attention_mask: torch.Tensor,
66
+ sequence_length: int,
67
+ target_length: int,
68
+ dtype: torch.dtype,
69
+ device: torch.device,
70
+ min_dtype: float,
71
+ cache_position: torch.Tensor,
72
+ batch_size: int,
73
+ ):
74
+ """
75
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
76
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
77
+
78
+ Args:
79
+ attention_mask (`torch.Tensor`):
80
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
81
+ sequence_length (`int`):
82
+ The sequence length being processed.
83
+ target_length (`int`):
84
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
85
+ dtype (`torch.dtype`):
86
+ The dtype to use for the 4D attention mask.
87
+ device (`torch.device`):
88
+ The device to place the 4D attention mask on.
89
+ min_dtype (`float`):
90
+ The minimum value representable with the dtype `dtype`.
91
+ cache_position (`torch.Tensor`):
92
+ Indices depicting the position of the input sequence tokens in the sequence.
93
+ batch_size (`torch.Tensor`):
94
+ Batch size.
95
+ """
96
+ if attention_mask is not None and attention_mask.dim() == 4:
97
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
98
+ causal_mask = attention_mask
99
+ else:
100
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
101
+ if sequence_length != 1:
102
+ causal_mask = torch.triu(causal_mask, diagonal=1)
103
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
104
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
105
+ if attention_mask is not None:
106
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
107
+ mask_length = attention_mask.shape[-1]
108
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
109
+ padding_mask = padding_mask == 0
110
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
111
+ padding_mask, min_dtype
112
+ )
113
+
114
+ return causal_mask
115
+
116
+
117
+ class DeciLMRMSNorm(nn.Module):
118
+ def __init__(self, hidden_size, eps=1e-6):
119
+ """
120
+ DeciLMRMSNorm is equivalent to T5LayerNorm
121
+ """
122
+ super().__init__()
123
+ self.weight = nn.Parameter(torch.ones(hidden_size))
124
+ self.variance_epsilon = eps
125
+
126
+ def forward(self, hidden_states):
127
+ input_dtype = hidden_states.dtype
128
+ hidden_states = hidden_states.to(torch.float32)
129
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
130
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
131
+ return self.weight * hidden_states.to(input_dtype)
132
+
133
+ def extra_repr(self):
134
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
135
+
136
+
137
+ ALL_LAYERNORM_LAYERS.append(DeciLMRMSNorm)
138
+
139
+
140
+ class DeciLMRotaryEmbedding(nn.Module):
141
+ def __init__(
142
+ self,
143
+ dim=None,
144
+ max_position_embeddings=2048,
145
+ base=10000,
146
+ device=None,
147
+ scaling_factor=1.0,
148
+ rope_type="default",
149
+ config: Optional[DeciLMConfig] = None,
150
+ ):
151
+ super().__init__()
152
+ # TODO (joao): remove the `if` below, only used for BC
153
+ self.rope_kwargs = {}
154
+ if config is None:
155
+ logger.warning_once(
156
+ "`DeciLMRotaryEmbedding` can now be fully parameterized by passing the model config through the "
157
+ "`config` argument. All other arguments will be removed in v4.45"
158
+ )
159
+ self.rope_kwargs = {
160
+ "rope_type": rope_type,
161
+ "factor": scaling_factor,
162
+ "dim": dim,
163
+ "base": base,
164
+ "max_position_embeddings": max_position_embeddings,
165
+ }
166
+ self.rope_type = rope_type
167
+ self.max_seq_len_cached = max_position_embeddings
168
+ self.original_max_seq_len = max_position_embeddings
169
+ else:
170
+ # BC: "rope_type" was originally "type"
171
+ if config.rope_scaling is not None:
172
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
173
+ else:
174
+ self.rope_type = "default"
175
+ self.max_seq_len_cached = config.max_position_embeddings
176
+ self.original_max_seq_len = config.max_position_embeddings
177
+
178
+ self.config = config
179
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
180
+
181
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
182
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
183
+ self.original_inv_freq = self.inv_freq
184
+
185
+ def _dynamic_frequency_update(self, position_ids, device):
186
+ """
187
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
188
+ 1 - growing beyond the cached sequence length (allow scaling)
189
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
190
+ """
191
+ seq_len = torch.max(position_ids) + 1
192
+ if seq_len > self.max_seq_len_cached: # growth
193
+ inv_freq, self.attention_scaling = self.rope_init_fn(
194
+ self.config, device, seq_len=seq_len, **self.rope_kwargs
195
+ )
196
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
197
+ self.max_seq_len_cached = seq_len
198
+
199
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
200
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
201
+ self.max_seq_len_cached = self.original_max_seq_len
202
+
203
+ @torch.no_grad()
204
+ def forward(self, x, position_ids):
205
+ if "dynamic" in self.rope_type:
206
+ self._dynamic_frequency_update(position_ids, device=x.device)
207
+
208
+ # Core RoPE block
209
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
210
+ position_ids_expanded = position_ids[:, None, :].float()
211
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
212
+ device_type = x.device.type
213
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
214
+ with torch.autocast(device_type=device_type, enabled=False):
215
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
216
+ emb = torch.cat((freqs, freqs), dim=-1)
217
+ cos = emb.cos()
218
+ sin = emb.sin()
219
+
220
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
221
+ cos = cos * self.attention_scaling
222
+ sin = sin * self.attention_scaling
223
+
224
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
225
+
226
+
227
+ class DeciLMLinearScalingRotaryEmbedding(DeciLMRotaryEmbedding):
228
+ """DeciLMRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
229
+
230
+ def __init__(self, *args, **kwargs):
231
+ logger.warning_once(
232
+ "`DeciLMLinearScalingRotaryEmbedding` is deprecated an will be removed in v4.45. Please use "
233
+ "`DeciLMRotaryEmbedding`, which now also does linear scaling (simply pass the model config to __init__)."
234
+ )
235
+ kwargs["rope_type"] = "linear"
236
+ super().__init__(*args, **kwargs)
237
+
238
+
239
+ class DeciLMDynamicNTKScalingRotaryEmbedding(DeciLMRotaryEmbedding):
240
+ """DeciLMRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
241
+
242
+ def __init__(self, *args, **kwargs):
243
+ logger.warning_once(
244
+ "`DeciLMDynamicNTKScalingRotaryEmbedding` is deprecated an will be removed in v4.45. Please use "
245
+ "`DeciLMRotaryEmbedding`, which now also does dynamic ntk scaling (simply pass the model config to "
246
+ "__init__)."
247
+ )
248
+ kwargs["rope_type"] = "dynamic"
249
+ super().__init__(*args, **kwargs)
250
+
251
+
252
+ def rotate_half(x):
253
+ """Rotates half the hidden dims of the input."""
254
+ x1 = x[..., : x.shape[-1] // 2]
255
+ x2 = x[..., x.shape[-1] // 2:]
256
+ return torch.cat((-x2, x1), dim=-1)
257
+
258
+
259
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
260
+ """Applies Rotary Position Embedding to the query and key tensors.
261
+
262
+ Args:
263
+ q (`torch.Tensor`): The query tensor.
264
+ k (`torch.Tensor`): The key tensor.
265
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
266
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
267
+ position_ids (`torch.Tensor`, *optional*):
268
+ Deprecated and unused.
269
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
270
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
271
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
272
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
273
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
274
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
275
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
276
+ Returns:
277
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
278
+ """
279
+ cos = cos.unsqueeze(unsqueeze_dim)
280
+ sin = sin.unsqueeze(unsqueeze_dim)
281
+ q_embed = (q * cos) + (rotate_half(q) * sin)
282
+ k_embed = (k * cos) + (rotate_half(k) * sin)
283
+ return q_embed, k_embed
284
+
285
+
286
+ class DeciLMMLP(nn.Module):
287
+ def __init__(self,
288
+ config: DeciLMConfig,
289
+ ffn_config: FFNConfig,
290
+ ):
291
+ super().__init__()
292
+ self.config = config
293
+ self.ffn_config = ffn_config
294
+ self.hidden_size = config.hidden_size
295
+ self.intermediate_size = _ffn_mult_to_intermediate_size(
296
+ ffn_config.ffn_mult, config.hidden_size) # DeciLM-specific code
297
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
298
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
299
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
300
+ self.act_fn = ACT2FN[config.hidden_act]
301
+
302
+ if ffn_config.sparsify is not None:
303
+ self.register_full_backward_hook(sparsity_backward_hook)
304
+
305
+ def forward(self, x):
306
+ if self.config.pretraining_tp > 1:
307
+ slice = self.intermediate_size // self.config.pretraining_tp
308
+ gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)
309
+ up_proj_slices = self.up_proj.weight.split(slice, dim=0)
310
+ down_proj_slices = self.down_proj.weight.split(slice, dim=1)
311
+
312
+ gate_proj = torch.cat(
313
+ [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1
314
+ )
315
+ up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
316
+
317
+ intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)
318
+ down_proj = [
319
+ F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)
320
+ ]
321
+ down_proj = sum(down_proj)
322
+ else:
323
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
324
+
325
+ return down_proj
326
+
327
+
328
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
329
+ """
330
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
331
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
332
+ """
333
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
334
+ if n_rep == 1:
335
+ return hidden_states
336
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
337
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
338
+
339
+
340
+ class DeciLMAttention(nn.Module):
341
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
342
+
343
+ def __init__(self,
344
+ config: DeciLMConfig,
345
+ attention_config: AttentionConfig,
346
+ layer_idx: Optional[int] = None,
347
+ ):
348
+ super().__init__()
349
+ self.config = config
350
+ self.attention_config = attention_config
351
+ self.layer_idx = layer_idx
352
+ if layer_idx is None:
353
+ logger.warning_once(
354
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
355
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
356
+ "when creating this class."
357
+ )
358
+
359
+ self.attention_dropout = config.attention_dropout
360
+ self.hidden_size = config.hidden_size
361
+ self.num_heads = config.num_attention_heads
362
+ self.head_dim = self.hidden_size // self.num_heads
363
+ self.num_key_value_groups = attention_config.n_heads_in_group # DeciLM-specific code
364
+ self.num_key_value_heads = self.num_heads // self.num_key_value_groups # DeciLM-specific code
365
+ self.max_position_embeddings = config.max_position_embeddings
366
+ self.rope_theta = config.rope_theta
367
+ self.is_causal = True
368
+
369
+ if (self.head_dim * self.num_heads) != self.hidden_size:
370
+ raise ValueError(
371
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
372
+ f" and `num_heads`: {self.num_heads})."
373
+ )
374
+
375
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
376
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
377
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
378
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias)
379
+
380
+ # TODO (joao): remove in v4.45 (RoPE is computed in the model, not in the decoder layers)
381
+ self.rotary_emb = DeciLMRotaryEmbedding(config=self.config)
382
+
383
+ if attention_config.sparsify is not None:
384
+ self.register_full_backward_hook(sparsity_backward_hook)
385
+
386
+ def forward(
387
+ self,
388
+ hidden_states: torch.Tensor,
389
+ attention_mask: Optional[torch.Tensor] = None,
390
+ position_ids: Optional[torch.LongTensor] = None,
391
+ past_key_value: Optional[Cache] = None,
392
+ output_attentions: bool = False,
393
+ use_cache: bool = False,
394
+ cache_position: Optional[torch.LongTensor] = None,
395
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45
396
+ **kwargs,
397
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
398
+ bsz, q_len, _ = hidden_states.size()
399
+ if self.config.pretraining_tp > 1:
400
+ key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
401
+ query_slices = self.q_proj.weight.split(
402
+ (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0
403
+ )
404
+ key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
405
+ value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
406
+
407
+ query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
408
+ query_states = torch.cat(query_states, dim=-1)
409
+
410
+ key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
411
+ key_states = torch.cat(key_states, dim=-1)
412
+
413
+ value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
414
+ value_states = torch.cat(value_states, dim=-1)
415
+
416
+ else:
417
+ query_states = self.q_proj(hidden_states)
418
+ key_states = self.k_proj(hidden_states)
419
+ value_states = self.v_proj(hidden_states)
420
+
421
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
422
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
423
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
424
+
425
+ if position_embeddings is None:
426
+ logger.warning_once(
427
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
428
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
429
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be "
430
+ "removed and `position_embeddings` will be mandatory."
431
+ )
432
+ cos, sin = self.rotary_emb(value_states, position_ids)
433
+ else:
434
+ cos, sin = position_embeddings
435
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
436
+
437
+ if past_key_value is not None:
438
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
439
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
440
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
441
+
442
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
443
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
444
+
445
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
446
+
447
+ if attention_mask is not None: # no matter the length, we just slice it
448
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
449
+ attn_weights = attn_weights + causal_mask
450
+
451
+ # upcast attention to fp32
452
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
453
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
454
+ attn_output = torch.matmul(attn_weights, value_states)
455
+
456
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
457
+ raise ValueError(
458
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
459
+ f" {attn_output.size()}"
460
+ )
461
+
462
+ attn_output = attn_output.transpose(1, 2).contiguous()
463
+
464
+ attn_output = attn_output.reshape(bsz, q_len, -1)
465
+
466
+ if self.config.pretraining_tp > 1:
467
+ attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
468
+ o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
469
+ attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
470
+ else:
471
+ attn_output = self.o_proj(attn_output)
472
+
473
+ if not output_attentions:
474
+ attn_weights = None
475
+
476
+ return attn_output, attn_weights, past_key_value
477
+
478
+
479
+ class DeciLMFlashAttention2(DeciLMAttention):
480
+ """
481
+ DeciLM flash attention module. This module inherits from `DeciLMAttention` as the weights of the module stays
482
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
483
+ flash attention and deal with padding tokens in case the input contains any of them.
484
+ """
485
+
486
+ def __init__(self, *args, **kwargs):
487
+ super().__init__(*args, **kwargs)
488
+
489
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
490
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
491
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
492
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
493
+
494
+ self.sliding_window = self.attention_config.prefill_sliding_window
495
+
496
+ def forward(
497
+ self,
498
+ hidden_states: torch.Tensor,
499
+ attention_mask: Optional[torch.LongTensor] = None,
500
+ position_ids: Optional[torch.LongTensor] = None,
501
+ past_key_value: Optional[Cache] = None,
502
+ output_attentions: bool = False,
503
+ use_cache: bool = False,
504
+ cache_position: Optional[torch.LongTensor] = None,
505
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45
506
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
507
+ output_attentions = False
508
+
509
+ bsz, q_len, _ = hidden_states.size()
510
+
511
+ query_states = self.q_proj(hidden_states)
512
+ key_states = self.k_proj(hidden_states)
513
+ value_states = self.v_proj(hidden_states)
514
+
515
+ # Flash attention requires the input to have the shape
516
+ # batch_size x seq_length x head_dim x hidden_dim
517
+ # therefore we just need to keep the original shape
518
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
519
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
520
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
521
+
522
+ if position_embeddings is None:
523
+ logger.warning_once(
524
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
525
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
526
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be "
527
+ "removed and `position_embeddings` will be mandatory."
528
+ )
529
+ cos, sin = self.rotary_emb(value_states, position_ids)
530
+ else:
531
+ cos, sin = position_embeddings
532
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
533
+
534
+ if past_key_value is not None:
535
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
536
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
537
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
538
+
539
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
540
+ # to be able to avoid many of these transpose/reshape/view.
541
+ query_states = query_states.transpose(1, 2)
542
+ key_states = key_states.transpose(1, 2)
543
+ value_states = value_states.transpose(1, 2)
544
+
545
+ dropout_rate = self.attention_dropout if self.training else 0.0
546
+
547
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
548
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
549
+ # cast them back in the correct dtype just to be sure everything works as expected.
550
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
551
+ # in fp32. (DeciLMRMSNorm handles it correctly)
552
+
553
+ input_dtype = query_states.dtype
554
+ if input_dtype == torch.float32:
555
+ if torch.is_autocast_enabled():
556
+ target_dtype = torch.get_autocast_gpu_dtype()
557
+ # Handle the case where the model is quantized
558
+ elif hasattr(self.config, "_pre_quantization_dtype"):
559
+ target_dtype = self.config._pre_quantization_dtype
560
+ else:
561
+ target_dtype = self.q_proj.weight.dtype
562
+
563
+ logger.warning_once(
564
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
565
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
566
+ f" {target_dtype}."
567
+ )
568
+
569
+ query_states = query_states.to(target_dtype)
570
+ key_states = key_states.to(target_dtype)
571
+ value_states = value_states.to(target_dtype)
572
+
573
+ attn_output = _flash_attention_forward(
574
+ query_states,
575
+ key_states,
576
+ value_states,
577
+ attention_mask,
578
+ q_len,
579
+ position_ids=position_ids,
580
+ dropout=dropout_rate,
581
+ sliding_window=self.sliding_window,
582
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
583
+ is_causal=self.is_causal,
584
+ )
585
+
586
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
587
+ attn_output = self.o_proj(attn_output)
588
+
589
+ if not output_attentions:
590
+ attn_weights = None
591
+
592
+ return attn_output, attn_weights, past_key_value
593
+
594
+
595
+ DECILM_ATTENTION_CLASSES = {
596
+ "eager": DeciLMAttention,
597
+ "flash_attention_2": DeciLMFlashAttention2,
598
+ }
599
+
600
+
601
+ class DeciLMDecoderLayer(nn.Module):
602
+ # DeciLM-specific code
603
+ def __init__(self, config: DeciLMConfig, layer_idx: int):
604
+ super().__init__()
605
+ self.config = config
606
+ self.hidden_size = config.hidden_size
607
+ self.block_config = config.block_configs[layer_idx]
608
+ self.attention_config = self.block_config.attention
609
+ self.ffn_config = self.block_config.ffn
610
+
611
+ if not self.attention_config.no_op:
612
+ self.input_layernorm = DeciLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
613
+ if not self.attention_config.replace_with_linear:
614
+ self.self_attn = DECILM_ATTENTION_CLASSES[config._attn_implementation](
615
+ config=config, attention_config=self.attention_config, layer_idx=layer_idx)
616
+ else:
617
+ self.self_attn = DeciLMLinearAttention(config)
618
+
619
+ if not self.ffn_config.no_op:
620
+ self.post_attention_layernorm = DeciLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
621
+ if not self.ffn_config.replace_with_linear:
622
+ self.mlp = DeciLMMLP(config, self.ffn_config)
623
+ else:
624
+ self.mlp = DeciLMLinearMLP(config)
625
+
626
+ self.is_sliding = self.attention_config.is_sliding
627
+ self.sliding_window = self.attention_config.prefill_sliding_window
628
+
629
+ def forward(
630
+ self,
631
+ hidden_states: torch.Tensor,
632
+ attention_mask: Optional[torch.Tensor] = None,
633
+ position_ids: Optional[torch.LongTensor] = None,
634
+ past_key_value: Optional[Cache] = None,
635
+ output_attentions: Optional[bool] = False,
636
+ use_cache: Optional[bool] = False,
637
+ cache_position: Optional[torch.LongTensor] = None,
638
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45
639
+ **kwargs,
640
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
641
+ """
642
+ Args:
643
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
644
+ attention_mask (`torch.FloatTensor`, *optional*):
645
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
646
+ query_sequence_length, key_sequence_length)` if default attention is used.
647
+ output_attentions (`bool`, *optional*):
648
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
649
+ returned tensors for more detail.
650
+ use_cache (`bool`, *optional*):
651
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
652
+ (see `past_key_values`).
653
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
654
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
655
+ Indices depicting the position of the input sequence tokens in the sequence
656
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
657
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
658
+ with `head_dim` being the embedding dimension of each attention head.
659
+ kwargs (`dict`, *optional*):
660
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
661
+ into the model
662
+ """
663
+ if self.attention_config.unshifted_sink and self.attention_config.is_sink:
664
+ attention_mask = self._unshifted_sink_mask(
665
+ attention_mask, hidden_states,
666
+ self.attention_config.window_length, self.attention_config.num_sink_tokens)
667
+ else:
668
+ attention_mask = self._gemma2_window_mask(attention_mask, hidden_states, past_key_value)
669
+
670
+ self_attn_weights = None
671
+ present_key_value = past_key_value
672
+ if self.attention_config.no_op:
673
+ pass
674
+ elif self.attention_config.replace_with_linear:
675
+ residual = hidden_states
676
+ hidden_states = self.input_layernorm(hidden_states)
677
+ hidden_states = self.self_attn(hidden_states)
678
+ hidden_states = residual + hidden_states
679
+ else:
680
+ residual = hidden_states
681
+ hidden_states = self.input_layernorm(hidden_states)
682
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
683
+ hidden_states=hidden_states,
684
+ attention_mask=attention_mask,
685
+ position_ids=position_ids,
686
+ past_key_value=past_key_value,
687
+ output_attentions=output_attentions,
688
+ use_cache=use_cache,
689
+ cache_position=cache_position,
690
+ position_embeddings=position_embeddings,
691
+ **kwargs,
692
+ )
693
+ hidden_states = residual + hidden_states
694
+
695
+ if not self.ffn_config.no_op:
696
+ residual = hidden_states
697
+ hidden_states = self.post_attention_layernorm(hidden_states)
698
+ hidden_states = self.mlp(hidden_states)
699
+ hidden_states = residual + hidden_states
700
+
701
+ outputs = (hidden_states,)
702
+
703
+ if output_attentions:
704
+ outputs += (self_attn_weights,)
705
+
706
+ if use_cache:
707
+ outputs += (present_key_value,)
708
+
709
+ return outputs
710
+
711
+ def _gemma2_window_mask(self,
712
+ attention_mask: Optional[torch.Tensor],
713
+ hidden_states: torch.Tensor,
714
+ past_key_value: Optional[VariableCache],
715
+ ) -> Optional[torch.Tensor]:
716
+ if self.is_sliding and attention_mask is not None: # efficient SDPA and no padding
717
+ # Flash-attn is a 2D tensor
718
+ if self.config._attn_implementation == "flash_attention_2":
719
+ if past_key_value is not None: # when decoding
720
+ attention_mask = attention_mask[:, -self.sliding_window:]
721
+ else:
722
+ min_dtype = torch.finfo(hidden_states.dtype).min
723
+ sliding_window_mask = torch.tril(
724
+ torch.ones_like(attention_mask, dtype=torch.bool), diagonal=-self.sliding_window
725
+ )
726
+ attention_mask = torch.where(sliding_window_mask, min_dtype, attention_mask)
727
+ if attention_mask.shape[-1] <= 1: # when decoding
728
+ attention_mask = attention_mask[:, :, :, -self.sliding_window:]
729
+ return attention_mask
730
+
731
+ def _unshifted_sink_mask(self,
732
+ attention_mask: torch.Tensor,
733
+ hidden_states: torch.Tensor,
734
+ window_length: int,
735
+ num_sink_tokens: Optional[int],
736
+ ) -> torch.Tensor:
737
+ assert self.config._attn_implementation == "eager", "Unshifted sink is only supported in 'eager' mode."
738
+ assert attention_mask is not None, "The attention mask seems to not be prepared"
739
+
740
+ attention_mask = attention_mask.clone()
741
+ min_dtype = torch.finfo(hidden_states.dtype).min
742
+
743
+ if window_length == 0:
744
+ attention_mask = torch.full_like(attention_mask, fill_value=min_dtype)
745
+ else:
746
+ query_length = attention_mask.shape[-2]
747
+ is_decode = (query_length == 1)
748
+ if is_decode:
749
+ attention_mask[:, :, :, :-window_length] = min_dtype
750
+ else:
751
+ sliding_window_mask = torch.tril(
752
+ torch.ones_like(attention_mask, dtype=torch.bool), diagonal=-window_length
753
+ )
754
+ attention_mask = torch.where(sliding_window_mask, min_dtype, attention_mask)
755
+
756
+ attention_mask[:, :, :, :num_sink_tokens] = 0
757
+ return attention_mask
758
+
759
+
760
+ DECILM_START_DOCSTRING = r"""
761
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
762
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
763
+ etc.)
764
+
765
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
766
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
767
+ and behavior.
768
+
769
+ Parameters:
770
+ config ([`DeciLMConfig`]):
771
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
772
+ load the weights associated with the model, only the configuration. Check out the
773
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
774
+ """
775
+
776
+
777
+ @add_start_docstrings(
778
+ "The bare DeciLM Model outputting raw hidden-states without any specific head on top.",
779
+ DECILM_START_DOCSTRING,
780
+ )
781
+ class DeciLMPreTrainedModel(PreTrainedModel):
782
+ config_class = DeciLMConfig
783
+ base_model_prefix = "model"
784
+ supports_gradient_checkpointing = True
785
+ _no_split_modules = ["DeciLMDecoderLayer"]
786
+ _skip_keys_device_placement = ["past_key_values"]
787
+ _supports_flash_attn_2 = True
788
+ _supports_sdpa = False
789
+ _supports_cache_class = True
790
+ _supports_quantized_cache = False
791
+ _supports_static_cache = True
792
+
793
+ def _init_weights(self, module):
794
+ std = self.config.initializer_range
795
+ if isinstance(module, nn.Linear):
796
+ module.weight.data.normal_(mean=0.0, std=std)
797
+ if module.bias is not None:
798
+ module.bias.data.zero_()
799
+ elif isinstance(module, nn.Embedding):
800
+ module.weight.data.normal_(mean=0.0, std=std)
801
+ if module.padding_idx is not None:
802
+ module.weight.data[module.padding_idx].zero_()
803
+
804
+ def _prepare_generation_config(
805
+ self, generation_config: Optional[GenerationConfig], **kwargs: dict
806
+ ) -> tuple[GenerationConfig, dict]:
807
+ # DeciLM-specific code
808
+ generation_config, model_kwargs = super()._prepare_generation_config(generation_config, **kwargs)
809
+ generation_config.cache_implementation = "variable"
810
+ NEED_SETUP_CACHE_CLASSES_MAPPING["variable"] = VariableCache
811
+ return generation_config, model_kwargs
812
+
813
+
814
+ DECILM_INPUTS_DOCSTRING = r"""
815
+ Args:
816
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
817
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
818
+ it.
819
+
820
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
821
+ [`PreTrainedTokenizer.__call__`] for details.
822
+
823
+ [What are input IDs?](../glossary#input-ids)
824
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
825
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
826
+
827
+ - 1 for tokens that are **not masked**,
828
+ - 0 for tokens that are **masked**.
829
+
830
+ [What are attention masks?](../glossary#attention-mask)
831
+
832
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
833
+ [`PreTrainedTokenizer.__call__`] for details.
834
+
835
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
836
+ `past_key_values`).
837
+
838
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
839
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
840
+ information on the default strategy.
841
+
842
+ - 1 indicates the head is **not masked**,
843
+ - 0 indicates the head is **masked**.
844
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
845
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
846
+ config.n_positions - 1]`.
847
+
848
+ [What are position IDs?](../glossary#position-ids)
849
+ past_key_values (`VariableCache`, *optional*):
850
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
851
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
852
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
853
+
854
+ If passed to the forward function, past_key_values must be a VariableCache object (see imports).
855
+ For generation purposes, this is already handled inside model.generate().
856
+
857
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
858
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
859
+ of shape `(batch_size, sequence_length)`.
860
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
861
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
862
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
863
+ model's internal embedding lookup matrix.
864
+ use_cache (`bool`, *optional*):
865
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
866
+ `past_key_values`).
867
+ output_attentions (`bool`, *optional*):
868
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
869
+ tensors for more detail.
870
+ output_hidden_states (`bool`, *optional*):
871
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
872
+ more detail.
873
+ return_dict (`bool`, *optional*):
874
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
875
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
876
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
877
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
878
+ the complete sequence length.
879
+ """
880
+
881
+
882
+ @add_start_docstrings(
883
+ "The bare DeciLM Model outputting raw hidden-states without any specific head on top.",
884
+ DECILM_START_DOCSTRING,
885
+ )
886
+ class DeciLMModel(DeciLMPreTrainedModel):
887
+ """
888
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`DeciLMDecoderLayer`]
889
+
890
+ Args:
891
+ config: DeciLMConfig
892
+ """
893
+
894
+ def __init__(self, config: DeciLMConfig):
895
+ super().__init__(config)
896
+ self.padding_idx = config.pad_token_id
897
+ self.vocab_size = config.vocab_size
898
+
899
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
900
+ self.layers = nn.ModuleList(
901
+ [DeciLMDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
902
+ )
903
+ self.norm = DeciLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
904
+ self.rotary_emb = DeciLMRotaryEmbedding(config=config)
905
+ self.gradient_checkpointing = False
906
+
907
+ # Initialize weights and apply final processing
908
+ self.post_init()
909
+
910
+ def get_input_embeddings(self):
911
+ return self.embed_tokens
912
+
913
+ def set_input_embeddings(self, value):
914
+ self.embed_tokens = value
915
+
916
+ @add_start_docstrings_to_model_forward(DECILM_INPUTS_DOCSTRING)
917
+ def forward(
918
+ self,
919
+ input_ids: torch.LongTensor = None,
920
+ attention_mask: Optional[torch.Tensor] = None,
921
+ position_ids: Optional[torch.LongTensor] = None,
922
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
923
+ inputs_embeds: Optional[torch.FloatTensor] = None,
924
+ use_cache: Optional[bool] = None,
925
+ output_attentions: Optional[bool] = None,
926
+ output_hidden_states: Optional[bool] = None,
927
+ return_dict: Optional[bool] = None,
928
+ cache_position: Optional[torch.LongTensor] = None,
929
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
930
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
931
+ output_hidden_states = (
932
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
933
+ )
934
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
935
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
936
+
937
+ if (input_ids is None) ^ (inputs_embeds is not None):
938
+ raise ValueError(
939
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
940
+ )
941
+
942
+ if self.gradient_checkpointing and self.training and use_cache:
943
+ logger.warning_once(
944
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
945
+ )
946
+ use_cache = False
947
+
948
+ if inputs_embeds is None:
949
+ inputs_embeds = self.embed_tokens(input_ids)
950
+
951
+ is_legacy_cache_format = (past_key_values is not None) and not isinstance(past_key_values, Cache)
952
+ if is_legacy_cache_format:
953
+ raise NotImplementedError("DeciLMModel does not support legacy cache format, please use a newer "
954
+ "transformers version or use VariableCache explicitly (see import in this file).")
955
+
956
+ if cache_position is None:
957
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
958
+ cache_position = torch.arange(
959
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
960
+ )
961
+ if position_ids is None:
962
+ position_ids = cache_position.unsqueeze(0)
963
+
964
+ causal_mask = self._update_causal_mask(
965
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
966
+ )
967
+ hidden_states = inputs_embeds
968
+
969
+ # create position embeddings to be shared across the decoder layers
970
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
971
+
972
+ # decoder layers
973
+ all_hidden_states = () if output_hidden_states else None
974
+ all_self_attns = () if output_attentions else None
975
+ next_decoder_cache = None
976
+
977
+ for decoder_layer in self.layers:
978
+ if output_hidden_states:
979
+ all_hidden_states += (hidden_states,)
980
+
981
+ if self.gradient_checkpointing and self.training:
982
+ layer_outputs = self._gradient_checkpointing_func(
983
+ decoder_layer.__call__,
984
+ hidden_states,
985
+ causal_mask,
986
+ position_ids,
987
+ past_key_values,
988
+ output_attentions,
989
+ use_cache,
990
+ cache_position,
991
+ position_embeddings,
992
+ )
993
+ else:
994
+ layer_outputs = decoder_layer(
995
+ hidden_states,
996
+ attention_mask=causal_mask,
997
+ position_ids=position_ids,
998
+ past_key_value=past_key_values,
999
+ output_attentions=output_attentions,
1000
+ use_cache=use_cache,
1001
+ cache_position=cache_position,
1002
+ position_embeddings=position_embeddings,
1003
+ )
1004
+
1005
+ hidden_states = layer_outputs[0]
1006
+
1007
+ if use_cache:
1008
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1009
+
1010
+ if output_attentions:
1011
+ all_self_attns += (layer_outputs[1],)
1012
+
1013
+ hidden_states = self.norm(hidden_states)
1014
+
1015
+ # add hidden states from the last decoder layer
1016
+ if output_hidden_states:
1017
+ all_hidden_states += (hidden_states,)
1018
+
1019
+ next_cache = next_decoder_cache if use_cache else None
1020
+
1021
+ if not return_dict:
1022
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1023
+ return BaseModelOutputWithPast(
1024
+ last_hidden_state=hidden_states,
1025
+ past_key_values=next_cache,
1026
+ hidden_states=all_hidden_states,
1027
+ attentions=all_self_attns,
1028
+ )
1029
+
1030
+ def _update_causal_mask(
1031
+ self,
1032
+ attention_mask: torch.Tensor,
1033
+ input_tensor: torch.Tensor,
1034
+ cache_position: torch.Tensor,
1035
+ past_key_values: Cache,
1036
+ output_attentions: bool,
1037
+ ):
1038
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
1039
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
1040
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
1041
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
1042
+
1043
+ if self.config._attn_implementation == "flash_attention_2":
1044
+ if attention_mask is not None and 0.0 in attention_mask:
1045
+ return attention_mask
1046
+ return None
1047
+
1048
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
1049
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
1050
+ # to infer the attention mask.
1051
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1052
+ assert not isinstance(past_key_values, StaticCache), "DeciLM does not support StaticCache"
1053
+ using_static_cache = isinstance(past_key_values, StaticCache)
1054
+
1055
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
1056
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
1057
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
1058
+ attention_mask,
1059
+ inputs_embeds=input_tensor,
1060
+ past_key_values_length=past_seen_tokens,
1061
+ is_training=self.training,
1062
+ ) and all([not layer.is_sliding for layer in self.layers]):
1063
+ return None
1064
+
1065
+ dtype, device = input_tensor.dtype, input_tensor.device
1066
+ min_dtype = torch.finfo(dtype).min
1067
+ sequence_length = input_tensor.shape[1]
1068
+ if using_static_cache:
1069
+ target_length = past_key_values.get_max_length()
1070
+ else:
1071
+ target_length = (
1072
+ attention_mask.shape[-1]
1073
+ if isinstance(attention_mask, torch.Tensor)
1074
+ else past_seen_tokens + sequence_length + 1
1075
+ )
1076
+
1077
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
1078
+ causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(
1079
+ attention_mask,
1080
+ sequence_length=sequence_length,
1081
+ target_length=target_length,
1082
+ dtype=dtype,
1083
+ device=device,
1084
+ min_dtype=min_dtype,
1085
+ cache_position=cache_position,
1086
+ batch_size=input_tensor.shape[0],
1087
+ )
1088
+
1089
+ if (
1090
+ self.config._attn_implementation == "sdpa"
1091
+ and attention_mask is not None
1092
+ and attention_mask.device.type == "cuda"
1093
+ and not output_attentions
1094
+ ):
1095
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1096
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1097
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1098
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1099
+
1100
+ return causal_mask
1101
+
1102
+
1103
+ class DeciLMForCausalLM(DeciLMPreTrainedModel, GenerationMixin):
1104
+ _tied_weights_keys = ["lm_head.weight"]
1105
+
1106
+ def __init__(self, config):
1107
+ super().__init__(config)
1108
+ self.model = DeciLMModel(config)
1109
+ self.vocab_size = config.vocab_size
1110
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1111
+
1112
+ # Initialize weights and apply final processing
1113
+ self.post_init()
1114
+
1115
+ def get_input_embeddings(self):
1116
+ return self.model.embed_tokens
1117
+
1118
+ def set_input_embeddings(self, value):
1119
+ self.model.embed_tokens = value
1120
+
1121
+ def get_output_embeddings(self):
1122
+ return self.lm_head
1123
+
1124
+ def set_output_embeddings(self, new_embeddings):
1125
+ self.lm_head = new_embeddings
1126
+
1127
+ def set_decoder(self, decoder):
1128
+ self.model = decoder
1129
+
1130
+ def get_decoder(self):
1131
+ return self.model
1132
+
1133
+ @add_start_docstrings_to_model_forward(DECILM_INPUTS_DOCSTRING)
1134
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1135
+ def forward(
1136
+ self,
1137
+ input_ids: torch.LongTensor = None,
1138
+ attention_mask: Optional[torch.Tensor] = None,
1139
+ position_ids: Optional[torch.LongTensor] = None,
1140
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1141
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1142
+ labels: Optional[torch.LongTensor] = None,
1143
+ use_cache: Optional[bool] = None,
1144
+ output_attentions: Optional[bool] = None,
1145
+ output_hidden_states: Optional[bool] = None,
1146
+ return_dict: Optional[bool] = None,
1147
+ cache_position: Optional[torch.LongTensor] = None,
1148
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1149
+ r"""
1150
+ Args:
1151
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1152
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1153
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1154
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1155
+
1156
+ Return:
1157
+ """
1158
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1159
+ output_hidden_states = (
1160
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1161
+ )
1162
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1163
+
1164
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1165
+ outputs = self.model(
1166
+ input_ids=input_ids,
1167
+ attention_mask=attention_mask,
1168
+ position_ids=position_ids,
1169
+ past_key_values=past_key_values,
1170
+ inputs_embeds=inputs_embeds,
1171
+ use_cache=use_cache,
1172
+ output_attentions=output_attentions,
1173
+ output_hidden_states=output_hidden_states,
1174
+ return_dict=return_dict,
1175
+ cache_position=cache_position,
1176
+ )
1177
+
1178
+ hidden_states = outputs[0]
1179
+ if self.config.pretraining_tp > 1:
1180
+ lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
1181
+ logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
1182
+ logits = torch.cat(logits, dim=-1)
1183
+ else:
1184
+ logits = self.lm_head(hidden_states)
1185
+ logits = logits.float()
1186
+
1187
+ loss = None
1188
+ if labels is not None:
1189
+ # Shift so that tokens < n predict n
1190
+ shift_logits = logits[..., :-1, :].contiguous()
1191
+ shift_labels = labels[..., 1:].contiguous()
1192
+ # Flatten the tokens
1193
+ loss_fct = CrossEntropyLoss()
1194
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1195
+ shift_labels = shift_labels.view(-1)
1196
+ # Enable model parallelism
1197
+ shift_labels = shift_labels.to(shift_logits.device)
1198
+ loss = loss_fct(shift_logits, shift_labels)
1199
+
1200
+ if not return_dict:
1201
+ output = (logits,) + outputs[1:]
1202
+ return (loss,) + output if loss is not None else output
1203
+
1204
+ return CausalLMOutputWithPast(
1205
+ loss=loss,
1206
+ logits=logits,
1207
+ past_key_values=outputs.past_key_values,
1208
+ hidden_states=outputs.hidden_states,
1209
+ attentions=outputs.attentions,
1210
+ )
1211
+
1212
+ def prepare_inputs_for_generation(
1213
+ self,
1214
+ input_ids,
1215
+ past_key_values=None,
1216
+ attention_mask=None,
1217
+ inputs_embeds=None,
1218
+ cache_position=None,
1219
+ position_ids=None,
1220
+ use_cache=True,
1221
+ **kwargs,
1222
+ ):
1223
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
1224
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
1225
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
1226
+ if past_key_values is not None:
1227
+ if inputs_embeds is not None: # Exception 1
1228
+ input_ids = input_ids[:, -cache_position.shape[0]:]
1229
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
1230
+ input_ids = input_ids[:, cache_position]
1231
+
1232
+ if attention_mask is not None and position_ids is None:
1233
+ # create position_ids on the fly for batch generation
1234
+ position_ids = attention_mask.long().cumsum(-1) - 1
1235
+ position_ids.masked_fill_(attention_mask == 0, 1)
1236
+ if past_key_values:
1237
+ position_ids = position_ids[:, -input_ids.shape[1]:]
1238
+
1239
+ # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride during the decoding. Here, simply using `.contiguous()` is not sufficient as in the batch size = 1 case, `position_ids` is already contiguous but with varying stride which retriggers a capture.
1240
+ position_ids = position_ids.clone(memory_format=torch.contiguous_format)
1241
+
1242
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1243
+ if inputs_embeds is not None and cache_position[0] == 0:
1244
+ model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None}
1245
+ else:
1246
+ # The clone here is for the same reason as for `position_ids`.
1247
+ model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None}
1248
+
1249
+ assert not isinstance(past_key_values, StaticCache), "DeciLM does not support StaticCache"
1250
+ if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
1251
+ if model_inputs["inputs_embeds"] is not None:
1252
+ batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
1253
+ device = model_inputs["inputs_embeds"].device
1254
+ else:
1255
+ batch_size, sequence_length = model_inputs["input_ids"].shape
1256
+ device = model_inputs["input_ids"].device
1257
+
1258
+ dtype = self.lm_head.weight.dtype
1259
+ min_dtype = torch.finfo(dtype).min
1260
+
1261
+ attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(
1262
+ attention_mask,
1263
+ sequence_length=sequence_length,
1264
+ target_length=past_key_values.get_max_length(),
1265
+ dtype=dtype,
1266
+ device=device,
1267
+ min_dtype=min_dtype,
1268
+ cache_position=cache_position,
1269
+ batch_size=batch_size,
1270
+ )
1271
+
1272
+ model_inputs.update(
1273
+ {
1274
+ "position_ids": position_ids,
1275
+ "cache_position": cache_position,
1276
+ "past_key_values": past_key_values,
1277
+ "use_cache": use_cache,
1278
+ "attention_mask": attention_mask,
1279
+ }
1280
+ )
1281
+ return model_inputs
1282
+
1283
+ def _maybe_initialize_input_ids_for_generation(
1284
+ self,
1285
+ inputs: Optional[torch.Tensor] = None,
1286
+ bos_token_id: Optional[torch.Tensor] = None,
1287
+ model_kwargs: Optional[dict[str, torch.Tensor]] = None,
1288
+ ) -> torch.LongTensor:
1289
+ """
1290
+ Patching hf bug that creates wrong cache length if only inputs_embeds are passed to the model
1291
+ """
1292
+ input_ids = super()._maybe_initialize_input_ids_for_generation(
1293
+ inputs=inputs, bos_token_id=bos_token_id, model_kwargs=model_kwargs)
1294
+ if (
1295
+ "inputs_embeds" in model_kwargs
1296
+ and input_ids is not None
1297
+ and input_ids.shape[1] == 0
1298
+ ):
1299
+ batch_size, input_sequence_length = model_kwargs["inputs_embeds"].shape[:2]
1300
+ input_ids = torch.zeros((batch_size, input_sequence_length), dtype=torch.long, device=self.device)
1301
+ return input_ids
1302
+
1303
+ def generate(
1304
+ self,
1305
+ inputs: Optional[torch.Tensor] = None,
1306
+ *args,
1307
+ **kwargs,
1308
+ ) -> Union[GenerateOutput, torch.LongTensor]:
1309
+ """
1310
+ Patching hf bug that creates wrong cache length if only inputs_embeds are passed to the model
1311
+ """
1312
+ only_passed_inputs_embeds = (
1313
+ "inputs_embeds" in kwargs and
1314
+ "input_ids" not in kwargs and
1315
+ inputs is None
1316
+ )
1317
+ if only_passed_inputs_embeds:
1318
+ input_sequence_length = kwargs["inputs_embeds"].shape[1]
1319
+
1320
+ generation_output = super().generate(inputs=inputs, *args, **kwargs)
1321
+
1322
+ if only_passed_inputs_embeds and isinstance(generation_output, torch.Tensor):
1323
+ generation_output = generation_output[:, input_sequence_length:]
1324
+
1325
+ return generation_output
1326
+
1327
+
1328
+ @add_start_docstrings(
1329
+ """
1330
+ The DeciLM Model transformer with a sequence classification head on top (linear layer).
1331
+
1332
+ [`DeciLMForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1333
+ (e.g. GPT-2) do.
1334
+
1335
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1336
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1337
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1338
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1339
+ each row of the batch).
1340
+ """,
1341
+ DECILM_START_DOCSTRING,
1342
+ )
1343
+ class DeciLMForSequenceClassification(DeciLMPreTrainedModel):
1344
+ def __init__(self, config):
1345
+ super().__init__(config)
1346
+ self.num_labels = config.num_labels
1347
+ self.model = DeciLMModel(config)
1348
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1349
+
1350
+ # Initialize weights and apply final processing
1351
+ self.post_init()
1352
+
1353
+ def get_input_embeddings(self):
1354
+ return self.model.embed_tokens
1355
+
1356
+ def set_input_embeddings(self, value):
1357
+ self.model.embed_tokens = value
1358
+
1359
+ @add_start_docstrings_to_model_forward(DECILM_INPUTS_DOCSTRING)
1360
+ def forward(
1361
+ self,
1362
+ input_ids: Optional[torch.LongTensor] = None,
1363
+ attention_mask: Optional[torch.Tensor] = None,
1364
+ position_ids: Optional[torch.LongTensor] = None,
1365
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1366
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1367
+ labels: Optional[torch.LongTensor] = None,
1368
+ use_cache: Optional[bool] = None,
1369
+ output_attentions: Optional[bool] = None,
1370
+ output_hidden_states: Optional[bool] = None,
1371
+ return_dict: Optional[bool] = None,
1372
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1373
+ r"""
1374
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1375
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1376
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1377
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1378
+ """
1379
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1380
+
1381
+ transformer_outputs = self.model(
1382
+ input_ids,
1383
+ attention_mask=attention_mask,
1384
+ position_ids=position_ids,
1385
+ past_key_values=past_key_values,
1386
+ inputs_embeds=inputs_embeds,
1387
+ use_cache=use_cache,
1388
+ output_attentions=output_attentions,
1389
+ output_hidden_states=output_hidden_states,
1390
+ return_dict=return_dict,
1391
+ )
1392
+ hidden_states = transformer_outputs[0]
1393
+ logits = self.score(hidden_states)
1394
+
1395
+ if input_ids is not None:
1396
+ batch_size = input_ids.shape[0]
1397
+ else:
1398
+ batch_size = inputs_embeds.shape[0]
1399
+
1400
+ if self.config.pad_token_id is None and batch_size != 1:
1401
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1402
+ if self.config.pad_token_id is None:
1403
+ sequence_lengths = -1
1404
+ else:
1405
+ if input_ids is not None:
1406
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1407
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1408
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1409
+ sequence_lengths = sequence_lengths.to(logits.device)
1410
+ else:
1411
+ sequence_lengths = -1
1412
+
1413
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1414
+
1415
+ loss = None
1416
+ if labels is not None:
1417
+ labels = labels.to(logits.device)
1418
+ if self.config.problem_type is None:
1419
+ if self.num_labels == 1:
1420
+ self.config.problem_type = "regression"
1421
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1422
+ self.config.problem_type = "single_label_classification"
1423
+ else:
1424
+ self.config.problem_type = "multi_label_classification"
1425
+
1426
+ if self.config.problem_type == "regression":
1427
+ loss_fct = MSELoss()
1428
+ if self.num_labels == 1:
1429
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1430
+ else:
1431
+ loss = loss_fct(pooled_logits, labels)
1432
+ elif self.config.problem_type == "single_label_classification":
1433
+ loss_fct = CrossEntropyLoss()
1434
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1435
+ elif self.config.problem_type == "multi_label_classification":
1436
+ loss_fct = BCEWithLogitsLoss()
1437
+ loss = loss_fct(pooled_logits, labels)
1438
+ if not return_dict:
1439
+ output = (pooled_logits,) + transformer_outputs[1:]
1440
+ return ((loss,) + output) if loss is not None else output
1441
+
1442
+ return SequenceClassifierOutputWithPast(
1443
+ loss=loss,
1444
+ logits=pooled_logits,
1445
+ past_key_values=transformer_outputs.past_key_values,
1446
+ hidden_states=transformer_outputs.hidden_states,
1447
+ attentions=transformer_outputs.attentions,
1448
+ )
1449
+
1450
+
1451
+ @add_start_docstrings(
1452
+ """
1453
+ The DeciLM Model transformer with a span classification head on top for extractive question-answering tasks like
1454
+ SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1455
+ """,
1456
+ DECILM_START_DOCSTRING,
1457
+ )
1458
+ class DeciLMForQuestionAnswering(DeciLMPreTrainedModel):
1459
+ base_model_prefix = "transformer"
1460
+
1461
+ # Copied from transformers.models.bloom.modeling_bloom.BloomForQuestionAnswering.__init__ with Bloom->DeciLM
1462
+ def __init__(self, config):
1463
+ super().__init__(config)
1464
+ self.transformer = DeciLMModel(config)
1465
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1466
+
1467
+ # Initialize weights and apply final processing
1468
+ self.post_init()
1469
+
1470
+ def get_input_embeddings(self):
1471
+ return self.transformer.embed_tokens
1472
+
1473
+ def set_input_embeddings(self, value):
1474
+ self.transformer.embed_tokens = value
1475
+
1476
+ @add_start_docstrings_to_model_forward(DECILM_INPUTS_DOCSTRING)
1477
+ def forward(
1478
+ self,
1479
+ input_ids: Optional[torch.LongTensor] = None,
1480
+ attention_mask: Optional[torch.FloatTensor] = None,
1481
+ position_ids: Optional[torch.LongTensor] = None,
1482
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1483
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1484
+ start_positions: Optional[torch.LongTensor] = None,
1485
+ end_positions: Optional[torch.LongTensor] = None,
1486
+ output_attentions: Optional[bool] = None,
1487
+ output_hidden_states: Optional[bool] = None,
1488
+ return_dict: Optional[bool] = None,
1489
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1490
+ r"""
1491
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1492
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1493
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1494
+ are not taken into account for computing the loss.
1495
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1496
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1497
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1498
+ are not taken into account for computing the loss.
1499
+ """
1500
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1501
+
1502
+ outputs = self.transformer(
1503
+ input_ids,
1504
+ attention_mask=attention_mask,
1505
+ position_ids=position_ids,
1506
+ past_key_values=past_key_values,
1507
+ inputs_embeds=inputs_embeds,
1508
+ output_attentions=output_attentions,
1509
+ output_hidden_states=output_hidden_states,
1510
+ return_dict=return_dict,
1511
+ )
1512
+
1513
+ sequence_output = outputs[0]
1514
+
1515
+ logits = self.qa_outputs(sequence_output)
1516
+ start_logits, end_logits = logits.split(1, dim=-1)
1517
+ start_logits = start_logits.squeeze(-1).contiguous()
1518
+ end_logits = end_logits.squeeze(-1).contiguous()
1519
+
1520
+ total_loss = None
1521
+ if start_positions is not None and end_positions is not None:
1522
+ # If we are on multi-GPU, split add a dimension
1523
+ if len(start_positions.size()) > 1:
1524
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
1525
+ if len(end_positions.size()) > 1:
1526
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
1527
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1528
+ ignored_index = start_logits.size(1)
1529
+ start_positions = start_positions.clamp(0, ignored_index)
1530
+ end_positions = end_positions.clamp(0, ignored_index)
1531
+
1532
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1533
+ start_loss = loss_fct(start_logits, start_positions)
1534
+ end_loss = loss_fct(end_logits, end_positions)
1535
+ total_loss = (start_loss + end_loss) / 2
1536
+
1537
+ if not return_dict:
1538
+ output = (start_logits, end_logits) + outputs[2:]
1539
+ return ((total_loss,) + output) if total_loss is not None else output
1540
+
1541
+ return QuestionAnsweringModelOutput(
1542
+ loss=total_loss,
1543
+ start_logits=start_logits,
1544
+ end_logits=end_logits,
1545
+ hidden_states=outputs.hidden_states,
1546
+ attentions=outputs.attentions,
1547
+ )
1548
+
1549
+
1550
+ @add_start_docstrings(
1551
+ """
1552
+ The DeciLM Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1553
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1554
+ """,
1555
+ DECILM_START_DOCSTRING,
1556
+ )
1557
+ class DeciLMForTokenClassification(DeciLMPreTrainedModel):
1558
+ def __init__(self, config):
1559
+ super().__init__(config)
1560
+ self.num_labels = config.num_labels
1561
+ self.model = DeciLMModel(config)
1562
+ if getattr(config, "classifier_dropout", None) is not None:
1563
+ classifier_dropout = config.classifier_dropout
1564
+ elif getattr(config, "hidden_dropout", None) is not None:
1565
+ classifier_dropout = config.hidden_dropout
1566
+ else:
1567
+ classifier_dropout = 0.1
1568
+ self.dropout = nn.Dropout(classifier_dropout)
1569
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
1570
+
1571
+ # Initialize weights and apply final processing
1572
+ self.post_init()
1573
+
1574
+ def get_input_embeddings(self):
1575
+ return self.model.embed_tokens
1576
+
1577
+ def set_input_embeddings(self, value):
1578
+ self.model.embed_tokens = value
1579
+
1580
+ @add_start_docstrings_to_model_forward(DECILM_INPUTS_DOCSTRING)
1581
+ def forward(
1582
+ self,
1583
+ input_ids: Optional[torch.LongTensor] = None,
1584
+ attention_mask: Optional[torch.Tensor] = None,
1585
+ position_ids: Optional[torch.LongTensor] = None,
1586
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1587
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1588
+ labels: Optional[torch.LongTensor] = None,
1589
+ use_cache: Optional[bool] = None,
1590
+ output_attentions: Optional[bool] = None,
1591
+ output_hidden_states: Optional[bool] = None,
1592
+ return_dict: Optional[bool] = None,
1593
+ ) -> Union[Tuple, TokenClassifierOutput]:
1594
+ r"""
1595
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1596
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1597
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1598
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1599
+ """
1600
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1601
+
1602
+ outputs = self.model(
1603
+ input_ids,
1604
+ attention_mask=attention_mask,
1605
+ position_ids=position_ids,
1606
+ past_key_values=past_key_values,
1607
+ inputs_embeds=inputs_embeds,
1608
+ use_cache=use_cache,
1609
+ output_attentions=output_attentions,
1610
+ output_hidden_states=output_hidden_states,
1611
+ return_dict=return_dict,
1612
+ )
1613
+ sequence_output = outputs[0]
1614
+ sequence_output = self.dropout(sequence_output)
1615
+ logits = self.score(sequence_output)
1616
+
1617
+ loss = None
1618
+ if labels is not None:
1619
+ loss_fct = CrossEntropyLoss()
1620
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1621
+
1622
+ if not return_dict:
1623
+ output = (logits,) + outputs[2:]
1624
+ return ((loss,) + output) if loss is not None else output
1625
+
1626
+ return TokenClassifierOutput(
1627
+ loss=loss,
1628
+ logits=logits,
1629
+ hidden_states=outputs.hidden_states,
1630
+ attentions=outputs.attentions,
1631
+ )
1632
+
1633
+
1634
+ ########################################################################
1635
+ # DeciLM-specific code
1636
+ ########################################################################
1637
+
1638
+
1639
+ def _ffn_mult_to_intermediate_size(ffn_mult: float, n_embd: int) -> int:
1640
+ # DeciLM-specific code
1641
+ intermediate_size = int(2 * ffn_mult * n_embd / 3)
1642
+ return _find_multiple(intermediate_size, 256)
1643
+
1644
+
1645
+ def _find_multiple(n: int, k: int) -> int:
1646
+ # DeciLM-specific code
1647
+ if n % k == 0:
1648
+ return n
1649
+ return n + k - (n % k)
1650
+
1651
+
1652
+ class DeciLMLinearMLP(nn.Module):
1653
+ # DeciLM-specific code
1654
+ def __init__(self,
1655
+ config: DeciLMConfig,
1656
+ ):
1657
+ super().__init__()
1658
+ self.linear_mlp = nn.Linear(in_features=config.hidden_size,
1659
+ out_features=config.hidden_size,
1660
+ bias=False)
1661
+
1662
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1663
+ return self.linear_mlp.forward(x)
1664
+
1665
+
1666
+ class DeciLMLinearAttention(nn.Module):
1667
+ # DeciLM-specific code
1668
+ def __init__(self,
1669
+ config: DeciLMConfig,
1670
+ ):
1671
+ super().__init__()
1672
+ self.linear_attn = nn.Linear(in_features=config.hidden_size,
1673
+ out_features=config.hidden_size,
1674
+ bias=False)
1675
+
1676
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1677
+ return self.linear_attn.forward(x)
1678
+
1679
+
1680
+ def sparsity_backward_hook(*args, **kwargs):
1681
+ raise NotImplementedError("No support for sparsity when training HF DeciLM (inference is ok though)")
nemo_common.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
nemo_model_config.yaml ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ restore_from_path: null
2
+ restore_from_ckpt: null
3
+ mcore_gpt: true
4
+ micro_batch_size: 1
5
+ global_batch_size: 288
6
+ tensor_model_parallel_size: 8
7
+ pipeline_model_parallel_size: 18
8
+ virtual_pipeline_model_parallel_size: null
9
+ encoder_seq_length: 17408
10
+ max_position_embeddings: 17408
11
+ num_layers: 162
12
+ hidden_size: 16384
13
+ ffn_hidden_size: 11008
14
+ num_attention_heads: 128
15
+ init_method_std: 0.02
16
+ use_scaled_init_method: true
17
+ hidden_dropout: 0.0
18
+ attention_dropout: 0.0
19
+ ffn_dropout: 0.0
20
+ kv_channels: null
21
+ apply_query_key_layer_scaling: true
22
+ normalization: rmsnorm
23
+ layernorm_epsilon: 1.0e-05
24
+ do_layer_norm_weight_decay: false
25
+ make_vocab_size_divisible_by: 128
26
+ pre_process: true
27
+ post_process: true
28
+ persist_layer_norm: true
29
+ bias: false
30
+ activation: fast-swiglu
31
+ headscale: false
32
+ transformer_block_type: pre_ln
33
+ openai_gelu: false
34
+ normalize_attention_scores: true
35
+ position_embedding_type: rope
36
+ rotary_percentage: 1.0
37
+ attention_type: multihead
38
+ share_embeddings_and_output_weights: false
39
+ overlap_p2p_comm: false
40
+ batch_p2p_comm: true
41
+ num_query_groups: 8
42
+ tokenizer:
43
+ library: huggingface
44
+ type: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
45
+ use_fast: true
46
+ native_amp_init_scale: 4294967296
47
+ native_amp_growth_interval: 1000
48
+ hysteresis: 2
49
+ fp32_residual_connection: false
50
+ fp16_lm_cross_entropy: false
51
+ megatron_amp_O2: true
52
+ grad_allreduce_chunk_size_mb: 125
53
+ grad_div_ar_fusion: true
54
+ gradient_accumulation_fusion: false
55
+ bias_activation_fusion: false
56
+ bias_dropout_add_fusion: false
57
+ masked_softmax_fusion: true
58
+ get_attention_mask_from_fusion: true
59
+ apply_rope_fusion: false
60
+ seed: 1234
61
+ resume_from_checkpoint: null
62
+ use_cpu_initialization: false
63
+ onnx_safe: false
64
+ apex_transformer_log_level: 30
65
+ gradient_as_bucket_view: true
66
+ sync_batch_comm: false
67
+ activations_checkpoint_granularity: full
68
+ activations_checkpoint_method: uniform
69
+ activations_checkpoint_num_layers: 1
70
+ num_micro_batches_with_partial_activation_checkpoints: null
71
+ activations_checkpoint_layers_per_pipeline: null
72
+ sequence_parallel: true
73
+ transformer_engine: true
74
+ fp8: false
75
+ fp8_e4m3: false
76
+ fp8_hybrid: true
77
+ fp8_margin: 0
78
+ fp8_interval: 1
79
+ fp8_amax_history_len: 1024
80
+ fp8_amax_compute_algo: max
81
+ reduce_amax: true
82
+ use_emha: false
83
+ data:
84
+ index_mapping_dir: null
85
+ data_impl: jsonl
86
+ splits_string: null
87
+ seq_length: 17408
88
+ skip_warmup: true
89
+ num_workers: 0
90
+ dataloader_type: single
91
+ reset_position_ids: false
92
+ reset_attention_mask: false
93
+ eod_mask_loss: false
94
+ validation_drop_last: true
95
+ no_seqlen_plus_one_input_tokens: false
96
+ pad_samples_to_global_batch_size: false
97
+ shuffle_documents: true
98
+ apply_chat_template: false
99
+ prompt_file: null
100
+ system_prompt_file: null
101
+ shuffle_train_data: false
102
+ system_prompt: detailed thinking off
103
+ data_prefix:
104
+ train:
105
+ - /lustre/fsw/portfolios/llmservice/users/jiaqiz/data/reinforce/hs2/hs2.multiturn.rl.sys12.train.jsonl
106
+ validation:
107
+ - /lustre/fsw/portfolios/llmservice/users/jiaqiz/data/reinforce/hs2/hs2.multiturn.rl.sys12.val.jsonl
108
+ test:
109
+ - /lustre/fsw/portfolios/llmservice/users/jiaqiz/data/reinforce/hs2/hs2.multiturn.rl.sys12.val.jsonl
110
+ nsys_profile:
111
+ enabled: false
112
+ start_step: 10
113
+ end_step: 10
114
+ ranks:
115
+ - 0
116
+ gen_shape: false
117
+ optim:
118
+ name: distributed_fused_adam
119
+ lr: 3.00001e-07
120
+ weight_decay: 0.1
121
+ betas:
122
+ - 0.9
123
+ - 0.98
124
+ sched:
125
+ name: CosineAnnealing
126
+ warmup_steps: 10
127
+ constant_steps: 1000
128
+ min_lr: 3.0e-07
129
+ max_steps: 3802
130
+ bucket_cap_mb: 200
131
+ overlap_grad_sync: false
132
+ overlap_param_sync: false
133
+ contiguous_grad_buffer: true
134
+ rotary_base: 500000.0
135
+ scale_positional_embedding: true
136
+ seq_len_interpolation_factor: null
137
+ heterogeneous_layers_config_path: /home/boryiings/lustre/aligner/253B/checkpoints/sft_step1800_nemo/NeMo/config.json
138
+ name: heterogeneous_gpt
139
+ precision: bf16
140
+ hf_model_name_or_configs_dir: /lustre/fs1/portfolios/llmservice/users/jiaqiz/results/253b-id-step14-diffrank-evelina-gpqa-scp116k-aops-llmjudge-prompt2-long-16klen-lr5e7-72nodes/checkpoints/HF/step21
141
+ grpo:
142
+ share_dir: /dev/shm/checkpoints_2248637
143
+ forward_micro_batch_size: 1
144
+ offload_adam_states: true
145
+ ratio_eps: 0.2
146
+ sampling_params:
147
+ use_greedy: false
148
+ temperature: 1
149
+ top_k: -1
150
+ top_p: 1.0
151
+ repetition_penalty: 1.0
152
+ add_BOS: false
153
+ all_probs: false
154
+ compute_logprob: false
155
+ end_strings:
156
+ - <|endoftext|>
157
+ - <extra_id_1>
158
+ length_params:
159
+ max_length: 16384
160
+ min_length: 1
161
+ generation_rollout_mbs: 16
162
+ trt_model_dir: /tmp/trt_llm_model
163
+ initial_policy_kl_penalty: 0.0001
164
+ inference_backend:
165
+ type: vllm
166
+ enable: true
167
+ seed: 1234
168
+ max_input_len: 1024
169
+ reshard: true
170
+ config:
171
+ trt_llm:
172
+ enable: false
173
+ model_type: llama
174
+ unload_engine_train: false
175
+ vllm:
176
+ enable: true
177
+ port: 4321
178
+ ip: cw-dfw-h100-001-262-012
179
+ trt_llm_pytorch:
180
+ enable: false
181
+ port: 4321
182
+ ip: localhost
183
+ peft:
184
+ peft_scheme: none
185
+ restore_from_path: null
186
+ restore_from_ckpt:
187
+ checkpoint_dir: null
188
+ checkpoint_name: null
189
+ lora_tuning:
190
+ target_modules:
191
+ - attention_qkv
192
+ adapter_dim: 32
193
+ adapter_dropout: 0.0
194
+ column_init_method: xavier
195
+ row_init_method: zero
196
+ layer_selection: null
197
+ weight_tying: false
198
+ position_embedding_strategy: null
199
+ context_parallel_size: 2
200
+ dist_ckpt_format: torch_dist
201
+ dist_ckpt_load_on_device: true
202
+ dist_ckpt_parallel_save: true
203
+ dist_ckpt_parallel_save_within_dp: false
204
+ dist_ckpt_parallel_load: false
205
+ dist_ckpt_torch_dist_multiproc: 2
206
+ dist_ckpt_assume_constant_structure: false
207
+ dist_ckpt_parallel_dist_opt: true
208
+ dist_ckpt_load_strictness: log_all
209
+ deallocate_pipeline_outputs: false
210
+ target: nemo_aligner.experimental.grpo.models.nlp.gpt.megatron_gpt_grpo_actor.MegatronGPTActorModel
211
+ nemo_version: 2.2.0rc0
quantization_config.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin_of_text|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|eot_id|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ }
16
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
3
+ size 17209920
tokenizer_config.json ADDED
@@ -0,0 +1,2063 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "128000": {
4
+ "content": "<|begin_of_text|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "128001": {
12
+ "content": "<|end_of_text|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "128002": {
20
+ "content": "<|reserved_special_token_0|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "128003": {
28
+ "content": "<|reserved_special_token_1|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "128004": {
36
+ "content": "<|finetune_right_pad_id|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "128005": {
44
+ "content": "<|reserved_special_token_2|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "128006": {
52
+ "content": "<|start_header_id|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "128007": {
60
+ "content": "<|end_header_id|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "128008": {
68
+ "content": "<|eom_id|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "128009": {
76
+ "content": "<|eot_id|>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "128010": {
84
+ "content": "<|python_tag|>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "128011": {
92
+ "content": "<|reserved_special_token_3|>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "128012": {
100
+ "content": "<|reserved_special_token_4|>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "128013": {
108
+ "content": "<|reserved_special_token_5|>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "128014": {
116
+ "content": "<|reserved_special_token_6|>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "128015": {
124
+ "content": "<|reserved_special_token_7|>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "128016": {
132
+ "content": "<|reserved_special_token_8|>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "128017": {
140
+ "content": "<|reserved_special_token_9|>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "128018": {
148
+ "content": "<|reserved_special_token_10|>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "128019": {
156
+ "content": "<|reserved_special_token_11|>",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "128020": {
164
+ "content": "<|reserved_special_token_12|>",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "128021": {
172
+ "content": "<|reserved_special_token_13|>",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": true
178
+ },
179
+ "128022": {
180
+ "content": "<|reserved_special_token_14|>",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": true
186
+ },
187
+ "128023": {
188
+ "content": "<|reserved_special_token_15|>",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": true
194
+ },
195
+ "128024": {
196
+ "content": "<|reserved_special_token_16|>",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": true
202
+ },
203
+ "128025": {
204
+ "content": "<|reserved_special_token_17|>",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": true
210
+ },
211
+ "128026": {
212
+ "content": "<|reserved_special_token_18|>",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": true
218
+ },
219
+ "128027": {
220
+ "content": "<|reserved_special_token_19|>",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": true
226
+ },
227
+ "128028": {
228
+ "content": "<|reserved_special_token_20|>",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": true
234
+ },
235
+ "128029": {
236
+ "content": "<|reserved_special_token_21|>",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": true
242
+ },
243
+ "128030": {
244
+ "content": "<|reserved_special_token_22|>",
245
+ "lstrip": false,
246
+ "normalized": false,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": true
250
+ },
251
+ "128031": {
252
+ "content": "<|reserved_special_token_23|>",
253
+ "lstrip": false,
254
+ "normalized": false,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "128032": {
260
+ "content": "<|reserved_special_token_24|>",
261
+ "lstrip": false,
262
+ "normalized": false,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": true
266
+ },
267
+ "128033": {
268
+ "content": "<|reserved_special_token_25|>",
269
+ "lstrip": false,
270
+ "normalized": false,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": true
274
+ },
275
+ "128034": {
276
+ "content": "<|reserved_special_token_26|>",
277
+ "lstrip": false,
278
+ "normalized": false,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": true
282
+ },
283
+ "128035": {
284
+ "content": "<|reserved_special_token_27|>",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": true
290
+ },
291
+ "128036": {
292
+ "content": "<|reserved_special_token_28|>",
293
+ "lstrip": false,
294
+ "normalized": false,
295
+ "rstrip": false,
296
+ "single_word": false,
297
+ "special": true
298
+ },
299
+ "128037": {
300
+ "content": "<|reserved_special_token_29|>",
301
+ "lstrip": false,
302
+ "normalized": false,
303
+ "rstrip": false,
304
+ "single_word": false,
305
+ "special": true
306
+ },
307
+ "128038": {
308
+ "content": "<|reserved_special_token_30|>",
309
+ "lstrip": false,
310
+ "normalized": false,
311
+ "rstrip": false,
312
+ "single_word": false,
313
+ "special": true
314
+ },
315
+ "128039": {
316
+ "content": "<|reserved_special_token_31|>",
317
+ "lstrip": false,
318
+ "normalized": false,
319
+ "rstrip": false,
320
+ "single_word": false,
321
+ "special": true
322
+ },
323
+ "128040": {
324
+ "content": "<|reserved_special_token_32|>",
325
+ "lstrip": false,
326
+ "normalized": false,
327
+ "rstrip": false,
328
+ "single_word": false,
329
+ "special": true
330
+ },
331
+ "128041": {
332
+ "content": "<|reserved_special_token_33|>",
333
+ "lstrip": false,
334
+ "normalized": false,
335
+ "rstrip": false,
336
+ "single_word": false,
337
+ "special": true
338
+ },
339
+ "128042": {
340
+ "content": "<|reserved_special_token_34|>",
341
+ "lstrip": false,
342
+ "normalized": false,
343
+ "rstrip": false,
344
+ "single_word": false,
345
+ "special": true
346
+ },
347
+ "128043": {
348
+ "content": "<|reserved_special_token_35|>",
349
+ "lstrip": false,
350
+ "normalized": false,
351
+ "rstrip": false,
352
+ "single_word": false,
353
+ "special": true
354
+ },
355
+ "128044": {
356
+ "content": "<|reserved_special_token_36|>",
357
+ "lstrip": false,
358
+ "normalized": false,
359
+ "rstrip": false,
360
+ "single_word": false,
361
+ "special": true
362
+ },
363
+ "128045": {
364
+ "content": "<|reserved_special_token_37|>",
365
+ "lstrip": false,
366
+ "normalized": false,
367
+ "rstrip": false,
368
+ "single_word": false,
369
+ "special": true
370
+ },
371
+ "128046": {
372
+ "content": "<|reserved_special_token_38|>",
373
+ "lstrip": false,
374
+ "normalized": false,
375
+ "rstrip": false,
376
+ "single_word": false,
377
+ "special": true
378
+ },
379
+ "128047": {
380
+ "content": "<|reserved_special_token_39|>",
381
+ "lstrip": false,
382
+ "normalized": false,
383
+ "rstrip": false,
384
+ "single_word": false,
385
+ "special": true
386
+ },
387
+ "128048": {
388
+ "content": "<|reserved_special_token_40|>",
389
+ "lstrip": false,
390
+ "normalized": false,
391
+ "rstrip": false,
392
+ "single_word": false,
393
+ "special": true
394
+ },
395
+ "128049": {
396
+ "content": "<|reserved_special_token_41|>",
397
+ "lstrip": false,
398
+ "normalized": false,
399
+ "rstrip": false,
400
+ "single_word": false,
401
+ "special": true
402
+ },
403
+ "128050": {
404
+ "content": "<|reserved_special_token_42|>",
405
+ "lstrip": false,
406
+ "normalized": false,
407
+ "rstrip": false,
408
+ "single_word": false,
409
+ "special": true
410
+ },
411
+ "128051": {
412
+ "content": "<|reserved_special_token_43|>",
413
+ "lstrip": false,
414
+ "normalized": false,
415
+ "rstrip": false,
416
+ "single_word": false,
417
+ "special": true
418
+ },
419
+ "128052": {
420
+ "content": "<|reserved_special_token_44|>",
421
+ "lstrip": false,
422
+ "normalized": false,
423
+ "rstrip": false,
424
+ "single_word": false,
425
+ "special": true
426
+ },
427
+ "128053": {
428
+ "content": "<|reserved_special_token_45|>",
429
+ "lstrip": false,
430
+ "normalized": false,
431
+ "rstrip": false,
432
+ "single_word": false,
433
+ "special": true
434
+ },
435
+ "128054": {
436
+ "content": "<|reserved_special_token_46|>",
437
+ "lstrip": false,
438
+ "normalized": false,
439
+ "rstrip": false,
440
+ "single_word": false,
441
+ "special": true
442
+ },
443
+ "128055": {
444
+ "content": "<|reserved_special_token_47|>",
445
+ "lstrip": false,
446
+ "normalized": false,
447
+ "rstrip": false,
448
+ "single_word": false,
449
+ "special": true
450
+ },
451
+ "128056": {
452
+ "content": "<|reserved_special_token_48|>",
453
+ "lstrip": false,
454
+ "normalized": false,
455
+ "rstrip": false,
456
+ "single_word": false,
457
+ "special": true
458
+ },
459
+ "128057": {
460
+ "content": "<|reserved_special_token_49|>",
461
+ "lstrip": false,
462
+ "normalized": false,
463
+ "rstrip": false,
464
+ "single_word": false,
465
+ "special": true
466
+ },
467
+ "128058": {
468
+ "content": "<|reserved_special_token_50|>",
469
+ "lstrip": false,
470
+ "normalized": false,
471
+ "rstrip": false,
472
+ "single_word": false,
473
+ "special": true
474
+ },
475
+ "128059": {
476
+ "content": "<|reserved_special_token_51|>",
477
+ "lstrip": false,
478
+ "normalized": false,
479
+ "rstrip": false,
480
+ "single_word": false,
481
+ "special": true
482
+ },
483
+ "128060": {
484
+ "content": "<|reserved_special_token_52|>",
485
+ "lstrip": false,
486
+ "normalized": false,
487
+ "rstrip": false,
488
+ "single_word": false,
489
+ "special": true
490
+ },
491
+ "128061": {
492
+ "content": "<|reserved_special_token_53|>",
493
+ "lstrip": false,
494
+ "normalized": false,
495
+ "rstrip": false,
496
+ "single_word": false,
497
+ "special": true
498
+ },
499
+ "128062": {
500
+ "content": "<|reserved_special_token_54|>",
501
+ "lstrip": false,
502
+ "normalized": false,
503
+ "rstrip": false,
504
+ "single_word": false,
505
+ "special": true
506
+ },
507
+ "128063": {
508
+ "content": "<|reserved_special_token_55|>",
509
+ "lstrip": false,
510
+ "normalized": false,
511
+ "rstrip": false,
512
+ "single_word": false,
513
+ "special": true
514
+ },
515
+ "128064": {
516
+ "content": "<|reserved_special_token_56|>",
517
+ "lstrip": false,
518
+ "normalized": false,
519
+ "rstrip": false,
520
+ "single_word": false,
521
+ "special": true
522
+ },
523
+ "128065": {
524
+ "content": "<|reserved_special_token_57|>",
525
+ "lstrip": false,
526
+ "normalized": false,
527
+ "rstrip": false,
528
+ "single_word": false,
529
+ "special": true
530
+ },
531
+ "128066": {
532
+ "content": "<|reserved_special_token_58|>",
533
+ "lstrip": false,
534
+ "normalized": false,
535
+ "rstrip": false,
536
+ "single_word": false,
537
+ "special": true
538
+ },
539
+ "128067": {
540
+ "content": "<|reserved_special_token_59|>",
541
+ "lstrip": false,
542
+ "normalized": false,
543
+ "rstrip": false,
544
+ "single_word": false,
545
+ "special": true
546
+ },
547
+ "128068": {
548
+ "content": "<|reserved_special_token_60|>",
549
+ "lstrip": false,
550
+ "normalized": false,
551
+ "rstrip": false,
552
+ "single_word": false,
553
+ "special": true
554
+ },
555
+ "128069": {
556
+ "content": "<|reserved_special_token_61|>",
557
+ "lstrip": false,
558
+ "normalized": false,
559
+ "rstrip": false,
560
+ "single_word": false,
561
+ "special": true
562
+ },
563
+ "128070": {
564
+ "content": "<|reserved_special_token_62|>",
565
+ "lstrip": false,
566
+ "normalized": false,
567
+ "rstrip": false,
568
+ "single_word": false,
569
+ "special": true
570
+ },
571
+ "128071": {
572
+ "content": "<|reserved_special_token_63|>",
573
+ "lstrip": false,
574
+ "normalized": false,
575
+ "rstrip": false,
576
+ "single_word": false,
577
+ "special": true
578
+ },
579
+ "128072": {
580
+ "content": "<|reserved_special_token_64|>",
581
+ "lstrip": false,
582
+ "normalized": false,
583
+ "rstrip": false,
584
+ "single_word": false,
585
+ "special": true
586
+ },
587
+ "128073": {
588
+ "content": "<|reserved_special_token_65|>",
589
+ "lstrip": false,
590
+ "normalized": false,
591
+ "rstrip": false,
592
+ "single_word": false,
593
+ "special": true
594
+ },
595
+ "128074": {
596
+ "content": "<|reserved_special_token_66|>",
597
+ "lstrip": false,
598
+ "normalized": false,
599
+ "rstrip": false,
600
+ "single_word": false,
601
+ "special": true
602
+ },
603
+ "128075": {
604
+ "content": "<|reserved_special_token_67|>",
605
+ "lstrip": false,
606
+ "normalized": false,
607
+ "rstrip": false,
608
+ "single_word": false,
609
+ "special": true
610
+ },
611
+ "128076": {
612
+ "content": "<|reserved_special_token_68|>",
613
+ "lstrip": false,
614
+ "normalized": false,
615
+ "rstrip": false,
616
+ "single_word": false,
617
+ "special": true
618
+ },
619
+ "128077": {
620
+ "content": "<|reserved_special_token_69|>",
621
+ "lstrip": false,
622
+ "normalized": false,
623
+ "rstrip": false,
624
+ "single_word": false,
625
+ "special": true
626
+ },
627
+ "128078": {
628
+ "content": "<|reserved_special_token_70|>",
629
+ "lstrip": false,
630
+ "normalized": false,
631
+ "rstrip": false,
632
+ "single_word": false,
633
+ "special": true
634
+ },
635
+ "128079": {
636
+ "content": "<|reserved_special_token_71|>",
637
+ "lstrip": false,
638
+ "normalized": false,
639
+ "rstrip": false,
640
+ "single_word": false,
641
+ "special": true
642
+ },
643
+ "128080": {
644
+ "content": "<|reserved_special_token_72|>",
645
+ "lstrip": false,
646
+ "normalized": false,
647
+ "rstrip": false,
648
+ "single_word": false,
649
+ "special": true
650
+ },
651
+ "128081": {
652
+ "content": "<|reserved_special_token_73|>",
653
+ "lstrip": false,
654
+ "normalized": false,
655
+ "rstrip": false,
656
+ "single_word": false,
657
+ "special": true
658
+ },
659
+ "128082": {
660
+ "content": "<|reserved_special_token_74|>",
661
+ "lstrip": false,
662
+ "normalized": false,
663
+ "rstrip": false,
664
+ "single_word": false,
665
+ "special": true
666
+ },
667
+ "128083": {
668
+ "content": "<|reserved_special_token_75|>",
669
+ "lstrip": false,
670
+ "normalized": false,
671
+ "rstrip": false,
672
+ "single_word": false,
673
+ "special": true
674
+ },
675
+ "128084": {
676
+ "content": "<|reserved_special_token_76|>",
677
+ "lstrip": false,
678
+ "normalized": false,
679
+ "rstrip": false,
680
+ "single_word": false,
681
+ "special": true
682
+ },
683
+ "128085": {
684
+ "content": "<|reserved_special_token_77|>",
685
+ "lstrip": false,
686
+ "normalized": false,
687
+ "rstrip": false,
688
+ "single_word": false,
689
+ "special": true
690
+ },
691
+ "128086": {
692
+ "content": "<|reserved_special_token_78|>",
693
+ "lstrip": false,
694
+ "normalized": false,
695
+ "rstrip": false,
696
+ "single_word": false,
697
+ "special": true
698
+ },
699
+ "128087": {
700
+ "content": "<|reserved_special_token_79|>",
701
+ "lstrip": false,
702
+ "normalized": false,
703
+ "rstrip": false,
704
+ "single_word": false,
705
+ "special": true
706
+ },
707
+ "128088": {
708
+ "content": "<|reserved_special_token_80|>",
709
+ "lstrip": false,
710
+ "normalized": false,
711
+ "rstrip": false,
712
+ "single_word": false,
713
+ "special": true
714
+ },
715
+ "128089": {
716
+ "content": "<|reserved_special_token_81|>",
717
+ "lstrip": false,
718
+ "normalized": false,
719
+ "rstrip": false,
720
+ "single_word": false,
721
+ "special": true
722
+ },
723
+ "128090": {
724
+ "content": "<|reserved_special_token_82|>",
725
+ "lstrip": false,
726
+ "normalized": false,
727
+ "rstrip": false,
728
+ "single_word": false,
729
+ "special": true
730
+ },
731
+ "128091": {
732
+ "content": "<|reserved_special_token_83|>",
733
+ "lstrip": false,
734
+ "normalized": false,
735
+ "rstrip": false,
736
+ "single_word": false,
737
+ "special": true
738
+ },
739
+ "128092": {
740
+ "content": "<|reserved_special_token_84|>",
741
+ "lstrip": false,
742
+ "normalized": false,
743
+ "rstrip": false,
744
+ "single_word": false,
745
+ "special": true
746
+ },
747
+ "128093": {
748
+ "content": "<|reserved_special_token_85|>",
749
+ "lstrip": false,
750
+ "normalized": false,
751
+ "rstrip": false,
752
+ "single_word": false,
753
+ "special": true
754
+ },
755
+ "128094": {
756
+ "content": "<|reserved_special_token_86|>",
757
+ "lstrip": false,
758
+ "normalized": false,
759
+ "rstrip": false,
760
+ "single_word": false,
761
+ "special": true
762
+ },
763
+ "128095": {
764
+ "content": "<|reserved_special_token_87|>",
765
+ "lstrip": false,
766
+ "normalized": false,
767
+ "rstrip": false,
768
+ "single_word": false,
769
+ "special": true
770
+ },
771
+ "128096": {
772
+ "content": "<|reserved_special_token_88|>",
773
+ "lstrip": false,
774
+ "normalized": false,
775
+ "rstrip": false,
776
+ "single_word": false,
777
+ "special": true
778
+ },
779
+ "128097": {
780
+ "content": "<|reserved_special_token_89|>",
781
+ "lstrip": false,
782
+ "normalized": false,
783
+ "rstrip": false,
784
+ "single_word": false,
785
+ "special": true
786
+ },
787
+ "128098": {
788
+ "content": "<|reserved_special_token_90|>",
789
+ "lstrip": false,
790
+ "normalized": false,
791
+ "rstrip": false,
792
+ "single_word": false,
793
+ "special": true
794
+ },
795
+ "128099": {
796
+ "content": "<|reserved_special_token_91|>",
797
+ "lstrip": false,
798
+ "normalized": false,
799
+ "rstrip": false,
800
+ "single_word": false,
801
+ "special": true
802
+ },
803
+ "128100": {
804
+ "content": "<|reserved_special_token_92|>",
805
+ "lstrip": false,
806
+ "normalized": false,
807
+ "rstrip": false,
808
+ "single_word": false,
809
+ "special": true
810
+ },
811
+ "128101": {
812
+ "content": "<|reserved_special_token_93|>",
813
+ "lstrip": false,
814
+ "normalized": false,
815
+ "rstrip": false,
816
+ "single_word": false,
817
+ "special": true
818
+ },
819
+ "128102": {
820
+ "content": "<|reserved_special_token_94|>",
821
+ "lstrip": false,
822
+ "normalized": false,
823
+ "rstrip": false,
824
+ "single_word": false,
825
+ "special": true
826
+ },
827
+ "128103": {
828
+ "content": "<|reserved_special_token_95|>",
829
+ "lstrip": false,
830
+ "normalized": false,
831
+ "rstrip": false,
832
+ "single_word": false,
833
+ "special": true
834
+ },
835
+ "128104": {
836
+ "content": "<|reserved_special_token_96|>",
837
+ "lstrip": false,
838
+ "normalized": false,
839
+ "rstrip": false,
840
+ "single_word": false,
841
+ "special": true
842
+ },
843
+ "128105": {
844
+ "content": "<|reserved_special_token_97|>",
845
+ "lstrip": false,
846
+ "normalized": false,
847
+ "rstrip": false,
848
+ "single_word": false,
849
+ "special": true
850
+ },
851
+ "128106": {
852
+ "content": "<|reserved_special_token_98|>",
853
+ "lstrip": false,
854
+ "normalized": false,
855
+ "rstrip": false,
856
+ "single_word": false,
857
+ "special": true
858
+ },
859
+ "128107": {
860
+ "content": "<|reserved_special_token_99|>",
861
+ "lstrip": false,
862
+ "normalized": false,
863
+ "rstrip": false,
864
+ "single_word": false,
865
+ "special": true
866
+ },
867
+ "128108": {
868
+ "content": "<|reserved_special_token_100|>",
869
+ "lstrip": false,
870
+ "normalized": false,
871
+ "rstrip": false,
872
+ "single_word": false,
873
+ "special": true
874
+ },
875
+ "128109": {
876
+ "content": "<|reserved_special_token_101|>",
877
+ "lstrip": false,
878
+ "normalized": false,
879
+ "rstrip": false,
880
+ "single_word": false,
881
+ "special": true
882
+ },
883
+ "128110": {
884
+ "content": "<|reserved_special_token_102|>",
885
+ "lstrip": false,
886
+ "normalized": false,
887
+ "rstrip": false,
888
+ "single_word": false,
889
+ "special": true
890
+ },
891
+ "128111": {
892
+ "content": "<|reserved_special_token_103|>",
893
+ "lstrip": false,
894
+ "normalized": false,
895
+ "rstrip": false,
896
+ "single_word": false,
897
+ "special": true
898
+ },
899
+ "128112": {
900
+ "content": "<|reserved_special_token_104|>",
901
+ "lstrip": false,
902
+ "normalized": false,
903
+ "rstrip": false,
904
+ "single_word": false,
905
+ "special": true
906
+ },
907
+ "128113": {
908
+ "content": "<|reserved_special_token_105|>",
909
+ "lstrip": false,
910
+ "normalized": false,
911
+ "rstrip": false,
912
+ "single_word": false,
913
+ "special": true
914
+ },
915
+ "128114": {
916
+ "content": "<|reserved_special_token_106|>",
917
+ "lstrip": false,
918
+ "normalized": false,
919
+ "rstrip": false,
920
+ "single_word": false,
921
+ "special": true
922
+ },
923
+ "128115": {
924
+ "content": "<|reserved_special_token_107|>",
925
+ "lstrip": false,
926
+ "normalized": false,
927
+ "rstrip": false,
928
+ "single_word": false,
929
+ "special": true
930
+ },
931
+ "128116": {
932
+ "content": "<|reserved_special_token_108|>",
933
+ "lstrip": false,
934
+ "normalized": false,
935
+ "rstrip": false,
936
+ "single_word": false,
937
+ "special": true
938
+ },
939
+ "128117": {
940
+ "content": "<|reserved_special_token_109|>",
941
+ "lstrip": false,
942
+ "normalized": false,
943
+ "rstrip": false,
944
+ "single_word": false,
945
+ "special": true
946
+ },
947
+ "128118": {
948
+ "content": "<|reserved_special_token_110|>",
949
+ "lstrip": false,
950
+ "normalized": false,
951
+ "rstrip": false,
952
+ "single_word": false,
953
+ "special": true
954
+ },
955
+ "128119": {
956
+ "content": "<|reserved_special_token_111|>",
957
+ "lstrip": false,
958
+ "normalized": false,
959
+ "rstrip": false,
960
+ "single_word": false,
961
+ "special": true
962
+ },
963
+ "128120": {
964
+ "content": "<|reserved_special_token_112|>",
965
+ "lstrip": false,
966
+ "normalized": false,
967
+ "rstrip": false,
968
+ "single_word": false,
969
+ "special": true
970
+ },
971
+ "128121": {
972
+ "content": "<|reserved_special_token_113|>",
973
+ "lstrip": false,
974
+ "normalized": false,
975
+ "rstrip": false,
976
+ "single_word": false,
977
+ "special": true
978
+ },
979
+ "128122": {
980
+ "content": "<|reserved_special_token_114|>",
981
+ "lstrip": false,
982
+ "normalized": false,
983
+ "rstrip": false,
984
+ "single_word": false,
985
+ "special": true
986
+ },
987
+ "128123": {
988
+ "content": "<|reserved_special_token_115|>",
989
+ "lstrip": false,
990
+ "normalized": false,
991
+ "rstrip": false,
992
+ "single_word": false,
993
+ "special": true
994
+ },
995
+ "128124": {
996
+ "content": "<|reserved_special_token_116|>",
997
+ "lstrip": false,
998
+ "normalized": false,
999
+ "rstrip": false,
1000
+ "single_word": false,
1001
+ "special": true
1002
+ },
1003
+ "128125": {
1004
+ "content": "<|reserved_special_token_117|>",
1005
+ "lstrip": false,
1006
+ "normalized": false,
1007
+ "rstrip": false,
1008
+ "single_word": false,
1009
+ "special": true
1010
+ },
1011
+ "128126": {
1012
+ "content": "<|reserved_special_token_118|>",
1013
+ "lstrip": false,
1014
+ "normalized": false,
1015
+ "rstrip": false,
1016
+ "single_word": false,
1017
+ "special": true
1018
+ },
1019
+ "128127": {
1020
+ "content": "<|reserved_special_token_119|>",
1021
+ "lstrip": false,
1022
+ "normalized": false,
1023
+ "rstrip": false,
1024
+ "single_word": false,
1025
+ "special": true
1026
+ },
1027
+ "128128": {
1028
+ "content": "<|reserved_special_token_120|>",
1029
+ "lstrip": false,
1030
+ "normalized": false,
1031
+ "rstrip": false,
1032
+ "single_word": false,
1033
+ "special": true
1034
+ },
1035
+ "128129": {
1036
+ "content": "<|reserved_special_token_121|>",
1037
+ "lstrip": false,
1038
+ "normalized": false,
1039
+ "rstrip": false,
1040
+ "single_word": false,
1041
+ "special": true
1042
+ },
1043
+ "128130": {
1044
+ "content": "<|reserved_special_token_122|>",
1045
+ "lstrip": false,
1046
+ "normalized": false,
1047
+ "rstrip": false,
1048
+ "single_word": false,
1049
+ "special": true
1050
+ },
1051
+ "128131": {
1052
+ "content": "<|reserved_special_token_123|>",
1053
+ "lstrip": false,
1054
+ "normalized": false,
1055
+ "rstrip": false,
1056
+ "single_word": false,
1057
+ "special": true
1058
+ },
1059
+ "128132": {
1060
+ "content": "<|reserved_special_token_124|>",
1061
+ "lstrip": false,
1062
+ "normalized": false,
1063
+ "rstrip": false,
1064
+ "single_word": false,
1065
+ "special": true
1066
+ },
1067
+ "128133": {
1068
+ "content": "<|reserved_special_token_125|>",
1069
+ "lstrip": false,
1070
+ "normalized": false,
1071
+ "rstrip": false,
1072
+ "single_word": false,
1073
+ "special": true
1074
+ },
1075
+ "128134": {
1076
+ "content": "<|reserved_special_token_126|>",
1077
+ "lstrip": false,
1078
+ "normalized": false,
1079
+ "rstrip": false,
1080
+ "single_word": false,
1081
+ "special": true
1082
+ },
1083
+ "128135": {
1084
+ "content": "<|reserved_special_token_127|>",
1085
+ "lstrip": false,
1086
+ "normalized": false,
1087
+ "rstrip": false,
1088
+ "single_word": false,
1089
+ "special": true
1090
+ },
1091
+ "128136": {
1092
+ "content": "<|reserved_special_token_128|>",
1093
+ "lstrip": false,
1094
+ "normalized": false,
1095
+ "rstrip": false,
1096
+ "single_word": false,
1097
+ "special": true
1098
+ },
1099
+ "128137": {
1100
+ "content": "<|reserved_special_token_129|>",
1101
+ "lstrip": false,
1102
+ "normalized": false,
1103
+ "rstrip": false,
1104
+ "single_word": false,
1105
+ "special": true
1106
+ },
1107
+ "128138": {
1108
+ "content": "<|reserved_special_token_130|>",
1109
+ "lstrip": false,
1110
+ "normalized": false,
1111
+ "rstrip": false,
1112
+ "single_word": false,
1113
+ "special": true
1114
+ },
1115
+ "128139": {
1116
+ "content": "<|reserved_special_token_131|>",
1117
+ "lstrip": false,
1118
+ "normalized": false,
1119
+ "rstrip": false,
1120
+ "single_word": false,
1121
+ "special": true
1122
+ },
1123
+ "128140": {
1124
+ "content": "<|reserved_special_token_132|>",
1125
+ "lstrip": false,
1126
+ "normalized": false,
1127
+ "rstrip": false,
1128
+ "single_word": false,
1129
+ "special": true
1130
+ },
1131
+ "128141": {
1132
+ "content": "<|reserved_special_token_133|>",
1133
+ "lstrip": false,
1134
+ "normalized": false,
1135
+ "rstrip": false,
1136
+ "single_word": false,
1137
+ "special": true
1138
+ },
1139
+ "128142": {
1140
+ "content": "<|reserved_special_token_134|>",
1141
+ "lstrip": false,
1142
+ "normalized": false,
1143
+ "rstrip": false,
1144
+ "single_word": false,
1145
+ "special": true
1146
+ },
1147
+ "128143": {
1148
+ "content": "<|reserved_special_token_135|>",
1149
+ "lstrip": false,
1150
+ "normalized": false,
1151
+ "rstrip": false,
1152
+ "single_word": false,
1153
+ "special": true
1154
+ },
1155
+ "128144": {
1156
+ "content": "<|reserved_special_token_136|>",
1157
+ "lstrip": false,
1158
+ "normalized": false,
1159
+ "rstrip": false,
1160
+ "single_word": false,
1161
+ "special": true
1162
+ },
1163
+ "128145": {
1164
+ "content": "<|reserved_special_token_137|>",
1165
+ "lstrip": false,
1166
+ "normalized": false,
1167
+ "rstrip": false,
1168
+ "single_word": false,
1169
+ "special": true
1170
+ },
1171
+ "128146": {
1172
+ "content": "<|reserved_special_token_138|>",
1173
+ "lstrip": false,
1174
+ "normalized": false,
1175
+ "rstrip": false,
1176
+ "single_word": false,
1177
+ "special": true
1178
+ },
1179
+ "128147": {
1180
+ "content": "<|reserved_special_token_139|>",
1181
+ "lstrip": false,
1182
+ "normalized": false,
1183
+ "rstrip": false,
1184
+ "single_word": false,
1185
+ "special": true
1186
+ },
1187
+ "128148": {
1188
+ "content": "<|reserved_special_token_140|>",
1189
+ "lstrip": false,
1190
+ "normalized": false,
1191
+ "rstrip": false,
1192
+ "single_word": false,
1193
+ "special": true
1194
+ },
1195
+ "128149": {
1196
+ "content": "<|reserved_special_token_141|>",
1197
+ "lstrip": false,
1198
+ "normalized": false,
1199
+ "rstrip": false,
1200
+ "single_word": false,
1201
+ "special": true
1202
+ },
1203
+ "128150": {
1204
+ "content": "<|reserved_special_token_142|>",
1205
+ "lstrip": false,
1206
+ "normalized": false,
1207
+ "rstrip": false,
1208
+ "single_word": false,
1209
+ "special": true
1210
+ },
1211
+ "128151": {
1212
+ "content": "<|reserved_special_token_143|>",
1213
+ "lstrip": false,
1214
+ "normalized": false,
1215
+ "rstrip": false,
1216
+ "single_word": false,
1217
+ "special": true
1218
+ },
1219
+ "128152": {
1220
+ "content": "<|reserved_special_token_144|>",
1221
+ "lstrip": false,
1222
+ "normalized": false,
1223
+ "rstrip": false,
1224
+ "single_word": false,
1225
+ "special": true
1226
+ },
1227
+ "128153": {
1228
+ "content": "<|reserved_special_token_145|>",
1229
+ "lstrip": false,
1230
+ "normalized": false,
1231
+ "rstrip": false,
1232
+ "single_word": false,
1233
+ "special": true
1234
+ },
1235
+ "128154": {
1236
+ "content": "<|reserved_special_token_146|>",
1237
+ "lstrip": false,
1238
+ "normalized": false,
1239
+ "rstrip": false,
1240
+ "single_word": false,
1241
+ "special": true
1242
+ },
1243
+ "128155": {
1244
+ "content": "<|reserved_special_token_147|>",
1245
+ "lstrip": false,
1246
+ "normalized": false,
1247
+ "rstrip": false,
1248
+ "single_word": false,
1249
+ "special": true
1250
+ },
1251
+ "128156": {
1252
+ "content": "<|reserved_special_token_148|>",
1253
+ "lstrip": false,
1254
+ "normalized": false,
1255
+ "rstrip": false,
1256
+ "single_word": false,
1257
+ "special": true
1258
+ },
1259
+ "128157": {
1260
+ "content": "<|reserved_special_token_149|>",
1261
+ "lstrip": false,
1262
+ "normalized": false,
1263
+ "rstrip": false,
1264
+ "single_word": false,
1265
+ "special": true
1266
+ },
1267
+ "128158": {
1268
+ "content": "<|reserved_special_token_150|>",
1269
+ "lstrip": false,
1270
+ "normalized": false,
1271
+ "rstrip": false,
1272
+ "single_word": false,
1273
+ "special": true
1274
+ },
1275
+ "128159": {
1276
+ "content": "<|reserved_special_token_151|>",
1277
+ "lstrip": false,
1278
+ "normalized": false,
1279
+ "rstrip": false,
1280
+ "single_word": false,
1281
+ "special": true
1282
+ },
1283
+ "128160": {
1284
+ "content": "<|reserved_special_token_152|>",
1285
+ "lstrip": false,
1286
+ "normalized": false,
1287
+ "rstrip": false,
1288
+ "single_word": false,
1289
+ "special": true
1290
+ },
1291
+ "128161": {
1292
+ "content": "<|reserved_special_token_153|>",
1293
+ "lstrip": false,
1294
+ "normalized": false,
1295
+ "rstrip": false,
1296
+ "single_word": false,
1297
+ "special": true
1298
+ },
1299
+ "128162": {
1300
+ "content": "<|reserved_special_token_154|>",
1301
+ "lstrip": false,
1302
+ "normalized": false,
1303
+ "rstrip": false,
1304
+ "single_word": false,
1305
+ "special": true
1306
+ },
1307
+ "128163": {
1308
+ "content": "<|reserved_special_token_155|>",
1309
+ "lstrip": false,
1310
+ "normalized": false,
1311
+ "rstrip": false,
1312
+ "single_word": false,
1313
+ "special": true
1314
+ },
1315
+ "128164": {
1316
+ "content": "<|reserved_special_token_156|>",
1317
+ "lstrip": false,
1318
+ "normalized": false,
1319
+ "rstrip": false,
1320
+ "single_word": false,
1321
+ "special": true
1322
+ },
1323
+ "128165": {
1324
+ "content": "<|reserved_special_token_157|>",
1325
+ "lstrip": false,
1326
+ "normalized": false,
1327
+ "rstrip": false,
1328
+ "single_word": false,
1329
+ "special": true
1330
+ },
1331
+ "128166": {
1332
+ "content": "<|reserved_special_token_158|>",
1333
+ "lstrip": false,
1334
+ "normalized": false,
1335
+ "rstrip": false,
1336
+ "single_word": false,
1337
+ "special": true
1338
+ },
1339
+ "128167": {
1340
+ "content": "<|reserved_special_token_159|>",
1341
+ "lstrip": false,
1342
+ "normalized": false,
1343
+ "rstrip": false,
1344
+ "single_word": false,
1345
+ "special": true
1346
+ },
1347
+ "128168": {
1348
+ "content": "<|reserved_special_token_160|>",
1349
+ "lstrip": false,
1350
+ "normalized": false,
1351
+ "rstrip": false,
1352
+ "single_word": false,
1353
+ "special": true
1354
+ },
1355
+ "128169": {
1356
+ "content": "<|reserved_special_token_161|>",
1357
+ "lstrip": false,
1358
+ "normalized": false,
1359
+ "rstrip": false,
1360
+ "single_word": false,
1361
+ "special": true
1362
+ },
1363
+ "128170": {
1364
+ "content": "<|reserved_special_token_162|>",
1365
+ "lstrip": false,
1366
+ "normalized": false,
1367
+ "rstrip": false,
1368
+ "single_word": false,
1369
+ "special": true
1370
+ },
1371
+ "128171": {
1372
+ "content": "<|reserved_special_token_163|>",
1373
+ "lstrip": false,
1374
+ "normalized": false,
1375
+ "rstrip": false,
1376
+ "single_word": false,
1377
+ "special": true
1378
+ },
1379
+ "128172": {
1380
+ "content": "<|reserved_special_token_164|>",
1381
+ "lstrip": false,
1382
+ "normalized": false,
1383
+ "rstrip": false,
1384
+ "single_word": false,
1385
+ "special": true
1386
+ },
1387
+ "128173": {
1388
+ "content": "<|reserved_special_token_165|>",
1389
+ "lstrip": false,
1390
+ "normalized": false,
1391
+ "rstrip": false,
1392
+ "single_word": false,
1393
+ "special": true
1394
+ },
1395
+ "128174": {
1396
+ "content": "<|reserved_special_token_166|>",
1397
+ "lstrip": false,
1398
+ "normalized": false,
1399
+ "rstrip": false,
1400
+ "single_word": false,
1401
+ "special": true
1402
+ },
1403
+ "128175": {
1404
+ "content": "<|reserved_special_token_167|>",
1405
+ "lstrip": false,
1406
+ "normalized": false,
1407
+ "rstrip": false,
1408
+ "single_word": false,
1409
+ "special": true
1410
+ },
1411
+ "128176": {
1412
+ "content": "<|reserved_special_token_168|>",
1413
+ "lstrip": false,
1414
+ "normalized": false,
1415
+ "rstrip": false,
1416
+ "single_word": false,
1417
+ "special": true
1418
+ },
1419
+ "128177": {
1420
+ "content": "<|reserved_special_token_169|>",
1421
+ "lstrip": false,
1422
+ "normalized": false,
1423
+ "rstrip": false,
1424
+ "single_word": false,
1425
+ "special": true
1426
+ },
1427
+ "128178": {
1428
+ "content": "<|reserved_special_token_170|>",
1429
+ "lstrip": false,
1430
+ "normalized": false,
1431
+ "rstrip": false,
1432
+ "single_word": false,
1433
+ "special": true
1434
+ },
1435
+ "128179": {
1436
+ "content": "<|reserved_special_token_171|>",
1437
+ "lstrip": false,
1438
+ "normalized": false,
1439
+ "rstrip": false,
1440
+ "single_word": false,
1441
+ "special": true
1442
+ },
1443
+ "128180": {
1444
+ "content": "<|reserved_special_token_172|>",
1445
+ "lstrip": false,
1446
+ "normalized": false,
1447
+ "rstrip": false,
1448
+ "single_word": false,
1449
+ "special": true
1450
+ },
1451
+ "128181": {
1452
+ "content": "<|reserved_special_token_173|>",
1453
+ "lstrip": false,
1454
+ "normalized": false,
1455
+ "rstrip": false,
1456
+ "single_word": false,
1457
+ "special": true
1458
+ },
1459
+ "128182": {
1460
+ "content": "<|reserved_special_token_174|>",
1461
+ "lstrip": false,
1462
+ "normalized": false,
1463
+ "rstrip": false,
1464
+ "single_word": false,
1465
+ "special": true
1466
+ },
1467
+ "128183": {
1468
+ "content": "<|reserved_special_token_175|>",
1469
+ "lstrip": false,
1470
+ "normalized": false,
1471
+ "rstrip": false,
1472
+ "single_word": false,
1473
+ "special": true
1474
+ },
1475
+ "128184": {
1476
+ "content": "<|reserved_special_token_176|>",
1477
+ "lstrip": false,
1478
+ "normalized": false,
1479
+ "rstrip": false,
1480
+ "single_word": false,
1481
+ "special": true
1482
+ },
1483
+ "128185": {
1484
+ "content": "<|reserved_special_token_177|>",
1485
+ "lstrip": false,
1486
+ "normalized": false,
1487
+ "rstrip": false,
1488
+ "single_word": false,
1489
+ "special": true
1490
+ },
1491
+ "128186": {
1492
+ "content": "<|reserved_special_token_178|>",
1493
+ "lstrip": false,
1494
+ "normalized": false,
1495
+ "rstrip": false,
1496
+ "single_word": false,
1497
+ "special": true
1498
+ },
1499
+ "128187": {
1500
+ "content": "<|reserved_special_token_179|>",
1501
+ "lstrip": false,
1502
+ "normalized": false,
1503
+ "rstrip": false,
1504
+ "single_word": false,
1505
+ "special": true
1506
+ },
1507
+ "128188": {
1508
+ "content": "<|reserved_special_token_180|>",
1509
+ "lstrip": false,
1510
+ "normalized": false,
1511
+ "rstrip": false,
1512
+ "single_word": false,
1513
+ "special": true
1514
+ },
1515
+ "128189": {
1516
+ "content": "<|reserved_special_token_181|>",
1517
+ "lstrip": false,
1518
+ "normalized": false,
1519
+ "rstrip": false,
1520
+ "single_word": false,
1521
+ "special": true
1522
+ },
1523
+ "128190": {
1524
+ "content": "<|reserved_special_token_182|>",
1525
+ "lstrip": false,
1526
+ "normalized": false,
1527
+ "rstrip": false,
1528
+ "single_word": false,
1529
+ "special": true
1530
+ },
1531
+ "128191": {
1532
+ "content": "<|reserved_special_token_183|>",
1533
+ "lstrip": false,
1534
+ "normalized": false,
1535
+ "rstrip": false,
1536
+ "single_word": false,
1537
+ "special": true
1538
+ },
1539
+ "128192": {
1540
+ "content": "<|reserved_special_token_184|>",
1541
+ "lstrip": false,
1542
+ "normalized": false,
1543
+ "rstrip": false,
1544
+ "single_word": false,
1545
+ "special": true
1546
+ },
1547
+ "128193": {
1548
+ "content": "<|reserved_special_token_185|>",
1549
+ "lstrip": false,
1550
+ "normalized": false,
1551
+ "rstrip": false,
1552
+ "single_word": false,
1553
+ "special": true
1554
+ },
1555
+ "128194": {
1556
+ "content": "<|reserved_special_token_186|>",
1557
+ "lstrip": false,
1558
+ "normalized": false,
1559
+ "rstrip": false,
1560
+ "single_word": false,
1561
+ "special": true
1562
+ },
1563
+ "128195": {
1564
+ "content": "<|reserved_special_token_187|>",
1565
+ "lstrip": false,
1566
+ "normalized": false,
1567
+ "rstrip": false,
1568
+ "single_word": false,
1569
+ "special": true
1570
+ },
1571
+ "128196": {
1572
+ "content": "<|reserved_special_token_188|>",
1573
+ "lstrip": false,
1574
+ "normalized": false,
1575
+ "rstrip": false,
1576
+ "single_word": false,
1577
+ "special": true
1578
+ },
1579
+ "128197": {
1580
+ "content": "<|reserved_special_token_189|>",
1581
+ "lstrip": false,
1582
+ "normalized": false,
1583
+ "rstrip": false,
1584
+ "single_word": false,
1585
+ "special": true
1586
+ },
1587
+ "128198": {
1588
+ "content": "<|reserved_special_token_190|>",
1589
+ "lstrip": false,
1590
+ "normalized": false,
1591
+ "rstrip": false,
1592
+ "single_word": false,
1593
+ "special": true
1594
+ },
1595
+ "128199": {
1596
+ "content": "<|reserved_special_token_191|>",
1597
+ "lstrip": false,
1598
+ "normalized": false,
1599
+ "rstrip": false,
1600
+ "single_word": false,
1601
+ "special": true
1602
+ },
1603
+ "128200": {
1604
+ "content": "<|reserved_special_token_192|>",
1605
+ "lstrip": false,
1606
+ "normalized": false,
1607
+ "rstrip": false,
1608
+ "single_word": false,
1609
+ "special": true
1610
+ },
1611
+ "128201": {
1612
+ "content": "<|reserved_special_token_193|>",
1613
+ "lstrip": false,
1614
+ "normalized": false,
1615
+ "rstrip": false,
1616
+ "single_word": false,
1617
+ "special": true
1618
+ },
1619
+ "128202": {
1620
+ "content": "<|reserved_special_token_194|>",
1621
+ "lstrip": false,
1622
+ "normalized": false,
1623
+ "rstrip": false,
1624
+ "single_word": false,
1625
+ "special": true
1626
+ },
1627
+ "128203": {
1628
+ "content": "<|reserved_special_token_195|>",
1629
+ "lstrip": false,
1630
+ "normalized": false,
1631
+ "rstrip": false,
1632
+ "single_word": false,
1633
+ "special": true
1634
+ },
1635
+ "128204": {
1636
+ "content": "<|reserved_special_token_196|>",
1637
+ "lstrip": false,
1638
+ "normalized": false,
1639
+ "rstrip": false,
1640
+ "single_word": false,
1641
+ "special": true
1642
+ },
1643
+ "128205": {
1644
+ "content": "<|reserved_special_token_197|>",
1645
+ "lstrip": false,
1646
+ "normalized": false,
1647
+ "rstrip": false,
1648
+ "single_word": false,
1649
+ "special": true
1650
+ },
1651
+ "128206": {
1652
+ "content": "<|reserved_special_token_198|>",
1653
+ "lstrip": false,
1654
+ "normalized": false,
1655
+ "rstrip": false,
1656
+ "single_word": false,
1657
+ "special": true
1658
+ },
1659
+ "128207": {
1660
+ "content": "<|reserved_special_token_199|>",
1661
+ "lstrip": false,
1662
+ "normalized": false,
1663
+ "rstrip": false,
1664
+ "single_word": false,
1665
+ "special": true
1666
+ },
1667
+ "128208": {
1668
+ "content": "<|reserved_special_token_200|>",
1669
+ "lstrip": false,
1670
+ "normalized": false,
1671
+ "rstrip": false,
1672
+ "single_word": false,
1673
+ "special": true
1674
+ },
1675
+ "128209": {
1676
+ "content": "<|reserved_special_token_201|>",
1677
+ "lstrip": false,
1678
+ "normalized": false,
1679
+ "rstrip": false,
1680
+ "single_word": false,
1681
+ "special": true
1682
+ },
1683
+ "128210": {
1684
+ "content": "<|reserved_special_token_202|>",
1685
+ "lstrip": false,
1686
+ "normalized": false,
1687
+ "rstrip": false,
1688
+ "single_word": false,
1689
+ "special": true
1690
+ },
1691
+ "128211": {
1692
+ "content": "<|reserved_special_token_203|>",
1693
+ "lstrip": false,
1694
+ "normalized": false,
1695
+ "rstrip": false,
1696
+ "single_word": false,
1697
+ "special": true
1698
+ },
1699
+ "128212": {
1700
+ "content": "<|reserved_special_token_204|>",
1701
+ "lstrip": false,
1702
+ "normalized": false,
1703
+ "rstrip": false,
1704
+ "single_word": false,
1705
+ "special": true
1706
+ },
1707
+ "128213": {
1708
+ "content": "<|reserved_special_token_205|>",
1709
+ "lstrip": false,
1710
+ "normalized": false,
1711
+ "rstrip": false,
1712
+ "single_word": false,
1713
+ "special": true
1714
+ },
1715
+ "128214": {
1716
+ "content": "<|reserved_special_token_206|>",
1717
+ "lstrip": false,
1718
+ "normalized": false,
1719
+ "rstrip": false,
1720
+ "single_word": false,
1721
+ "special": true
1722
+ },
1723
+ "128215": {
1724
+ "content": "<|reserved_special_token_207|>",
1725
+ "lstrip": false,
1726
+ "normalized": false,
1727
+ "rstrip": false,
1728
+ "single_word": false,
1729
+ "special": true
1730
+ },
1731
+ "128216": {
1732
+ "content": "<|reserved_special_token_208|>",
1733
+ "lstrip": false,
1734
+ "normalized": false,
1735
+ "rstrip": false,
1736
+ "single_word": false,
1737
+ "special": true
1738
+ },
1739
+ "128217": {
1740
+ "content": "<|reserved_special_token_209|>",
1741
+ "lstrip": false,
1742
+ "normalized": false,
1743
+ "rstrip": false,
1744
+ "single_word": false,
1745
+ "special": true
1746
+ },
1747
+ "128218": {
1748
+ "content": "<|reserved_special_token_210|>",
1749
+ "lstrip": false,
1750
+ "normalized": false,
1751
+ "rstrip": false,
1752
+ "single_word": false,
1753
+ "special": true
1754
+ },
1755
+ "128219": {
1756
+ "content": "<|reserved_special_token_211|>",
1757
+ "lstrip": false,
1758
+ "normalized": false,
1759
+ "rstrip": false,
1760
+ "single_word": false,
1761
+ "special": true
1762
+ },
1763
+ "128220": {
1764
+ "content": "<|reserved_special_token_212|>",
1765
+ "lstrip": false,
1766
+ "normalized": false,
1767
+ "rstrip": false,
1768
+ "single_word": false,
1769
+ "special": true
1770
+ },
1771
+ "128221": {
1772
+ "content": "<|reserved_special_token_213|>",
1773
+ "lstrip": false,
1774
+ "normalized": false,
1775
+ "rstrip": false,
1776
+ "single_word": false,
1777
+ "special": true
1778
+ },
1779
+ "128222": {
1780
+ "content": "<|reserved_special_token_214|>",
1781
+ "lstrip": false,
1782
+ "normalized": false,
1783
+ "rstrip": false,
1784
+ "single_word": false,
1785
+ "special": true
1786
+ },
1787
+ "128223": {
1788
+ "content": "<|reserved_special_token_215|>",
1789
+ "lstrip": false,
1790
+ "normalized": false,
1791
+ "rstrip": false,
1792
+ "single_word": false,
1793
+ "special": true
1794
+ },
1795
+ "128224": {
1796
+ "content": "<|reserved_special_token_216|>",
1797
+ "lstrip": false,
1798
+ "normalized": false,
1799
+ "rstrip": false,
1800
+ "single_word": false,
1801
+ "special": true
1802
+ },
1803
+ "128225": {
1804
+ "content": "<|reserved_special_token_217|>",
1805
+ "lstrip": false,
1806
+ "normalized": false,
1807
+ "rstrip": false,
1808
+ "single_word": false,
1809
+ "special": true
1810
+ },
1811
+ "128226": {
1812
+ "content": "<|reserved_special_token_218|>",
1813
+ "lstrip": false,
1814
+ "normalized": false,
1815
+ "rstrip": false,
1816
+ "single_word": false,
1817
+ "special": true
1818
+ },
1819
+ "128227": {
1820
+ "content": "<|reserved_special_token_219|>",
1821
+ "lstrip": false,
1822
+ "normalized": false,
1823
+ "rstrip": false,
1824
+ "single_word": false,
1825
+ "special": true
1826
+ },
1827
+ "128228": {
1828
+ "content": "<|reserved_special_token_220|>",
1829
+ "lstrip": false,
1830
+ "normalized": false,
1831
+ "rstrip": false,
1832
+ "single_word": false,
1833
+ "special": true
1834
+ },
1835
+ "128229": {
1836
+ "content": "<|reserved_special_token_221|>",
1837
+ "lstrip": false,
1838
+ "normalized": false,
1839
+ "rstrip": false,
1840
+ "single_word": false,
1841
+ "special": true
1842
+ },
1843
+ "128230": {
1844
+ "content": "<|reserved_special_token_222|>",
1845
+ "lstrip": false,
1846
+ "normalized": false,
1847
+ "rstrip": false,
1848
+ "single_word": false,
1849
+ "special": true
1850
+ },
1851
+ "128231": {
1852
+ "content": "<|reserved_special_token_223|>",
1853
+ "lstrip": false,
1854
+ "normalized": false,
1855
+ "rstrip": false,
1856
+ "single_word": false,
1857
+ "special": true
1858
+ },
1859
+ "128232": {
1860
+ "content": "<|reserved_special_token_224|>",
1861
+ "lstrip": false,
1862
+ "normalized": false,
1863
+ "rstrip": false,
1864
+ "single_word": false,
1865
+ "special": true
1866
+ },
1867
+ "128233": {
1868
+ "content": "<|reserved_special_token_225|>",
1869
+ "lstrip": false,
1870
+ "normalized": false,
1871
+ "rstrip": false,
1872
+ "single_word": false,
1873
+ "special": true
1874
+ },
1875
+ "128234": {
1876
+ "content": "<|reserved_special_token_226|>",
1877
+ "lstrip": false,
1878
+ "normalized": false,
1879
+ "rstrip": false,
1880
+ "single_word": false,
1881
+ "special": true
1882
+ },
1883
+ "128235": {
1884
+ "content": "<|reserved_special_token_227|>",
1885
+ "lstrip": false,
1886
+ "normalized": false,
1887
+ "rstrip": false,
1888
+ "single_word": false,
1889
+ "special": true
1890
+ },
1891
+ "128236": {
1892
+ "content": "<|reserved_special_token_228|>",
1893
+ "lstrip": false,
1894
+ "normalized": false,
1895
+ "rstrip": false,
1896
+ "single_word": false,
1897
+ "special": true
1898
+ },
1899
+ "128237": {
1900
+ "content": "<|reserved_special_token_229|>",
1901
+ "lstrip": false,
1902
+ "normalized": false,
1903
+ "rstrip": false,
1904
+ "single_word": false,
1905
+ "special": true
1906
+ },
1907
+ "128238": {
1908
+ "content": "<|reserved_special_token_230|>",
1909
+ "lstrip": false,
1910
+ "normalized": false,
1911
+ "rstrip": false,
1912
+ "single_word": false,
1913
+ "special": true
1914
+ },
1915
+ "128239": {
1916
+ "content": "<|reserved_special_token_231|>",
1917
+ "lstrip": false,
1918
+ "normalized": false,
1919
+ "rstrip": false,
1920
+ "single_word": false,
1921
+ "special": true
1922
+ },
1923
+ "128240": {
1924
+ "content": "<|reserved_special_token_232|>",
1925
+ "lstrip": false,
1926
+ "normalized": false,
1927
+ "rstrip": false,
1928
+ "single_word": false,
1929
+ "special": true
1930
+ },
1931
+ "128241": {
1932
+ "content": "<|reserved_special_token_233|>",
1933
+ "lstrip": false,
1934
+ "normalized": false,
1935
+ "rstrip": false,
1936
+ "single_word": false,
1937
+ "special": true
1938
+ },
1939
+ "128242": {
1940
+ "content": "<|reserved_special_token_234|>",
1941
+ "lstrip": false,
1942
+ "normalized": false,
1943
+ "rstrip": false,
1944
+ "single_word": false,
1945
+ "special": true
1946
+ },
1947
+ "128243": {
1948
+ "content": "<|reserved_special_token_235|>",
1949
+ "lstrip": false,
1950
+ "normalized": false,
1951
+ "rstrip": false,
1952
+ "single_word": false,
1953
+ "special": true
1954
+ },
1955
+ "128244": {
1956
+ "content": "<|reserved_special_token_236|>",
1957
+ "lstrip": false,
1958
+ "normalized": false,
1959
+ "rstrip": false,
1960
+ "single_word": false,
1961
+ "special": true
1962
+ },
1963
+ "128245": {
1964
+ "content": "<|reserved_special_token_237|>",
1965
+ "lstrip": false,
1966
+ "normalized": false,
1967
+ "rstrip": false,
1968
+ "single_word": false,
1969
+ "special": true
1970
+ },
1971
+ "128246": {
1972
+ "content": "<|reserved_special_token_238|>",
1973
+ "lstrip": false,
1974
+ "normalized": false,
1975
+ "rstrip": false,
1976
+ "single_word": false,
1977
+ "special": true
1978
+ },
1979
+ "128247": {
1980
+ "content": "<|reserved_special_token_239|>",
1981
+ "lstrip": false,
1982
+ "normalized": false,
1983
+ "rstrip": false,
1984
+ "single_word": false,
1985
+ "special": true
1986
+ },
1987
+ "128248": {
1988
+ "content": "<|reserved_special_token_240|>",
1989
+ "lstrip": false,
1990
+ "normalized": false,
1991
+ "rstrip": false,
1992
+ "single_word": false,
1993
+ "special": true
1994
+ },
1995
+ "128249": {
1996
+ "content": "<|reserved_special_token_241|>",
1997
+ "lstrip": false,
1998
+ "normalized": false,
1999
+ "rstrip": false,
2000
+ "single_word": false,
2001
+ "special": true
2002
+ },
2003
+ "128250": {
2004
+ "content": "<|reserved_special_token_242|>",
2005
+ "lstrip": false,
2006
+ "normalized": false,
2007
+ "rstrip": false,
2008
+ "single_word": false,
2009
+ "special": true
2010
+ },
2011
+ "128251": {
2012
+ "content": "<|reserved_special_token_243|>",
2013
+ "lstrip": false,
2014
+ "normalized": false,
2015
+ "rstrip": false,
2016
+ "single_word": false,
2017
+ "special": true
2018
+ },
2019
+ "128252": {
2020
+ "content": "<|reserved_special_token_244|>",
2021
+ "lstrip": false,
2022
+ "normalized": false,
2023
+ "rstrip": false,
2024
+ "single_word": false,
2025
+ "special": true
2026
+ },
2027
+ "128253": {
2028
+ "content": "<|reserved_special_token_245|>",
2029
+ "lstrip": false,
2030
+ "normalized": false,
2031
+ "rstrip": false,
2032
+ "single_word": false,
2033
+ "special": true
2034
+ },
2035
+ "128254": {
2036
+ "content": "<|reserved_special_token_246|>",
2037
+ "lstrip": false,
2038
+ "normalized": false,
2039
+ "rstrip": false,
2040
+ "single_word": false,
2041
+ "special": true
2042
+ },
2043
+ "128255": {
2044
+ "content": "<|reserved_special_token_247|>",
2045
+ "lstrip": false,
2046
+ "normalized": false,
2047
+ "rstrip": false,
2048
+ "single_word": false,
2049
+ "special": true
2050
+ }
2051
+ },
2052
+ "bos_token": "<|begin_of_text|>",
2053
+ "chat_template": "{{- bos_token }}{%- if messages[0]['role'] == 'system' %}{%- set system_message = messages[0]['content']|trim %}{%- set messages = messages[1:] %}{%- else %}{%- set system_message = \"detailed thinking on\" %}{%- endif %}{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}{{- system_message }}{{- \"<|eot_id|>\" }}{%- for message in messages %}{%- if message['role'] == 'assistant' and '</think>' in message['content'] %}{%- set content = message['content'].split('</think>')[-1].lstrip() %}{%- else %}{%- set content = message['content'] %}{%- endif %}{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + content | trim + '<|eot_id|>' }}{%- endfor %}{%- if add_generation_prompt %}{{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}{%- endif %}",
2054
+ "clean_up_tokenization_spaces": true,
2055
+ "eos_token": "<|eot_id|>",
2056
+ "extra_special_tokens": {},
2057
+ "model_input_names": [
2058
+ "input_ids",
2059
+ "attention_mask"
2060
+ ],
2061
+ "model_max_length": 131072,
2062
+ "tokenizer_class": "PreTrainedTokenizerFast"
2063
+ }
tokenizer_name.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ meta-llama/Llama-3.1-405B-Instruct
training_flowchart.png ADDED

Git LFS Details

  • SHA256: e2cbe095571b305bf14370bb8669900a79c0c834cc614a5bb6475f7214ea345f
  • Pointer size: 131 Bytes
  • Size of remote file: 505 kB
transformers_4_44_2__activations.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from collections import OrderedDict
17
+
18
+ import torch
19
+ from packaging import version
20
+ from torch import Tensor, nn
21
+
22
+ from transformers.utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class PytorchGELUTanh(nn.Module):
29
+ """
30
+ A fast C implementation of the tanh approximation of the GeLU activation function. See
31
+ https://arxiv.org/abs/1606.08415.
32
+
33
+ This implementation is equivalent to NewGELU and FastGELU but much faster. However, it is not an exact numerical
34
+ match due to rounding errors.
35
+ """
36
+
37
+ def __init__(self):
38
+ super().__init__()
39
+ if version.parse(torch.__version__) < version.parse("1.12.0"):
40
+ raise ImportError(
41
+ f"You are using torch=={torch.__version__}, but torch>=1.12.0 is required to use "
42
+ "PytorchGELUTanh. Please upgrade torch."
43
+ )
44
+
45
+ def forward(self, input: Tensor) -> Tensor:
46
+ return nn.functional.gelu(input, approximate="tanh")
47
+
48
+
49
+ class NewGELUActivation(nn.Module):
50
+ """
51
+ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
52
+ the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
53
+ """
54
+
55
+ def forward(self, input: Tensor) -> Tensor:
56
+ return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
57
+
58
+
59
+ class GELUActivation(nn.Module):
60
+ """
61
+ Original Implementation of the GELU activation function in Google BERT repo when initially created. For
62
+ information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
63
+ torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional
64
+ Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
65
+ """
66
+
67
+ def __init__(self, use_gelu_python: bool = False):
68
+ super().__init__()
69
+ if use_gelu_python:
70
+ self.act = self._gelu_python
71
+ else:
72
+ self.act = nn.functional.gelu
73
+
74
+ def _gelu_python(self, input: Tensor) -> Tensor:
75
+ return input * 0.5 * (1.0 + torch.erf(input / math.sqrt(2.0)))
76
+
77
+ def forward(self, input: Tensor) -> Tensor:
78
+ return self.act(input)
79
+
80
+
81
+ class FastGELUActivation(nn.Module):
82
+ """
83
+ Applies GELU approximation that is slower than QuickGELU but more accurate. See: https://github.com/hendrycks/GELUs
84
+ """
85
+
86
+ def forward(self, input: Tensor) -> Tensor:
87
+ return 0.5 * input * (1.0 + torch.tanh(input * 0.7978845608 * (1.0 + 0.044715 * input * input)))
88
+
89
+
90
+ class QuickGELUActivation(nn.Module):
91
+ """
92
+ Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs
93
+ """
94
+
95
+ def forward(self, input: Tensor) -> Tensor:
96
+ return input * torch.sigmoid(1.702 * input)
97
+
98
+
99
+ class ClippedGELUActivation(nn.Module):
100
+ """
101
+ Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as
102
+ it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to
103
+ https://arxiv.org/abs/2004.09602.
104
+
105
+ Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
106
+ initially created.
107
+
108
+ For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 +
109
+ torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))). See https://arxiv.org/abs/1606.08415
110
+ """
111
+
112
+ def __init__(self, min: float, max: float):
113
+ if min > max:
114
+ raise ValueError(f"min should be < max (got min: {min}, max: {max})")
115
+
116
+ super().__init__()
117
+ self.min = min
118
+ self.max = max
119
+
120
+ def forward(self, x: Tensor) -> Tensor:
121
+ return torch.clip(gelu(x), self.min, self.max)
122
+
123
+
124
+ class AccurateGELUActivation(nn.Module):
125
+ """
126
+ Applies GELU approximation that is faster than default and more accurate than QuickGELU. See:
127
+ https://github.com/hendrycks/GELUs
128
+
129
+ Implemented along with MEGA (Moving Average Equipped Gated Attention)
130
+ """
131
+
132
+ def __init__(self):
133
+ super().__init__()
134
+ self.precomputed_constant = math.sqrt(2 / math.pi)
135
+
136
+ def forward(self, input: Tensor) -> Tensor:
137
+ return 0.5 * input * (1 + torch.tanh(self.precomputed_constant * (input + 0.044715 * torch.pow(input, 3))))
138
+
139
+
140
+ class MishActivation(nn.Module):
141
+ """
142
+ See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also
143
+ visit the official repository for the paper: https://github.com/digantamisra98/Mish
144
+ """
145
+
146
+ def __init__(self):
147
+ super().__init__()
148
+ if version.parse(torch.__version__) < version.parse("1.9.0"):
149
+ self.act = self._mish_python
150
+ else:
151
+ self.act = nn.functional.mish
152
+
153
+ def _mish_python(self, input: Tensor) -> Tensor:
154
+ return input * torch.tanh(nn.functional.softplus(input))
155
+
156
+ def forward(self, input: Tensor) -> Tensor:
157
+ return self.act(input)
158
+
159
+
160
+ class LinearActivation(nn.Module):
161
+ """
162
+ Applies the linear activation function, i.e. forwarding input directly to output.
163
+ """
164
+
165
+ def forward(self, input: Tensor) -> Tensor:
166
+ return input
167
+
168
+
169
+ class LaplaceActivation(nn.Module):
170
+ """
171
+ Applies elementwise activation based on Laplace function, introduced in MEGA as an attention activation. See
172
+ https://arxiv.org/abs/2209.10655
173
+
174
+ Inspired by squared relu, but with bounded range and gradient for better stability
175
+ """
176
+
177
+ def forward(self, input, mu=0.707107, sigma=0.282095):
178
+ input = (input - mu).div(sigma * math.sqrt(2.0))
179
+ return 0.5 * (1.0 + torch.erf(input))
180
+
181
+
182
+ class ReLUSquaredActivation(nn.Module):
183
+ """
184
+ Applies the relu^2 activation introduced in https://arxiv.org/abs/2109.08668v2
185
+ """
186
+
187
+ def forward(self, input):
188
+ relu_applied = nn.functional.relu(input)
189
+ squared = torch.square(relu_applied)
190
+ return squared
191
+
192
+
193
+ class ClassInstantier(OrderedDict):
194
+ def __getitem__(self, key):
195
+ content = super().__getitem__(key)
196
+ cls, kwargs = content if isinstance(content, tuple) else (content, {})
197
+ return cls(**kwargs)
198
+
199
+
200
+ ACT2CLS = {
201
+ "gelu": GELUActivation,
202
+ "gelu_10": (ClippedGELUActivation, {"min": -10, "max": 10}),
203
+ "gelu_fast": FastGELUActivation,
204
+ "gelu_new": NewGELUActivation,
205
+ "gelu_python": (GELUActivation, {"use_gelu_python": True}),
206
+ "gelu_pytorch_tanh": PytorchGELUTanh,
207
+ "gelu_accurate": AccurateGELUActivation,
208
+ "laplace": LaplaceActivation,
209
+ "leaky_relu": nn.LeakyReLU,
210
+ "linear": LinearActivation,
211
+ "mish": MishActivation,
212
+ "quick_gelu": QuickGELUActivation,
213
+ "relu": nn.ReLU,
214
+ "relu2": ReLUSquaredActivation,
215
+ "relu6": nn.ReLU6,
216
+ "sigmoid": nn.Sigmoid,
217
+ "silu": nn.SiLU,
218
+ "swish": nn.SiLU,
219
+ "tanh": nn.Tanh,
220
+ }
221
+ ACT2FN = ClassInstantier(ACT2CLS)
222
+
223
+
224
+ def get_activation(activation_string):
225
+ if activation_string in ACT2FN:
226
+ return ACT2FN[activation_string]
227
+ else:
228
+ raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
229
+
230
+
231
+ # For backwards compatibility with: from activations import gelu_python
232
+ gelu_python = get_activation("gelu_python")
233
+ gelu_new = get_activation("gelu_new")
234
+ gelu = get_activation("gelu")
235
+ gelu_fast = get_activation("gelu_fast")
236
+ quick_gelu = get_activation("quick_gelu")
237
+ silu = get_activation("silu")
238
+ mish = get_activation("mish")
239
+ linear_act = get_activation("linear")
transformers_4_44_2__cache_utils.py ADDED
@@ -0,0 +1,1347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import importlib.metadata
3
+ import json
4
+ import os
5
+ from dataclasses import dataclass
6
+ from typing import Any, Dict, List, Optional, Tuple, Union
7
+
8
+ import torch
9
+ from packaging import version
10
+
11
+ from transformers.configuration_utils import PretrainedConfig
12
+ from transformers.utils import is_torchdynamo_compiling, logging
13
+
14
+
15
+ logger = logging.get_logger(__name__)
16
+
17
+
18
+ class Cache(torch.nn.Module):
19
+ """
20
+ Base, abstract class for all caches. The actual data structure is specific to each subclass.
21
+ """
22
+
23
+ def __init__(self):
24
+ super().__init__()
25
+
26
+ def update(
27
+ self,
28
+ key_states: torch.Tensor,
29
+ value_states: torch.Tensor,
30
+ layer_idx: int,
31
+ cache_kwargs: Optional[Dict[str, Any]] = None,
32
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
33
+ """
34
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
35
+
36
+ Parameters:
37
+ key_states (`torch.Tensor`):
38
+ The new key states to cache.
39
+ value_states (`torch.Tensor`):
40
+ The new value states to cache.
41
+ layer_idx (`int`):
42
+ The index of the layer to cache the states for.
43
+ cache_kwargs (`Dict[str, Any]`, `optional`):
44
+ Additional arguments for the cache subclass. These are specific to each subclass and allow new types of
45
+ cache to be created.
46
+
47
+ Return:
48
+ A tuple containing the updated key and value states.
49
+ """
50
+ raise NotImplementedError("Make sure to implement `update` in a subclass.")
51
+
52
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
53
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
54
+ # TODO: deprecate this function in favor of `cache_position`
55
+ raise NotImplementedError("Make sure to implement `get_seq_length` in a subclass.")
56
+
57
+ def get_max_length(self) -> Optional[int]:
58
+ """Returns the maximum sequence length of the cached states, if there is any."""
59
+ raise NotImplementedError("Make sure to implement `get_max_length` in a subclass.")
60
+
61
+ def get_usable_length(self, new_seq_length: int, layer_idx: Optional[int] = 0) -> int:
62
+ """Given the sequence length of the new inputs, returns the usable length of the cache."""
63
+ # Cache without size limit -> all cache is usable
64
+ # Cache with size limit -> if the length cache plus the length of the new inputs is larger the maximum cache
65
+ # length, we will need to evict part of the cache (and thus not all cache is usable)
66
+ max_length = self.get_max_length()
67
+ previous_seq_length = self.get_seq_length(layer_idx)
68
+ if max_length is not None and previous_seq_length + new_seq_length > max_length:
69
+ return max_length - new_seq_length
70
+ return previous_seq_length
71
+
72
+ def reorder_cache(self, beam_idx: torch.LongTensor):
73
+ """Reorders the cache for beam search, given the selected beam indices."""
74
+ for layer_idx in range(len(self.key_cache)):
75
+ device = self.key_cache[layer_idx].device
76
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
77
+ device = self.value_cache[layer_idx].device
78
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
79
+
80
+ @property
81
+ def seen_tokens(self):
82
+ logger.warning_once(
83
+ "The `seen_tokens` attribute is deprecated and will be removed in v4.41. Use the `cache_position` "
84
+ "model input instead."
85
+ )
86
+ if hasattr(self, "_seen_tokens"):
87
+ return self._seen_tokens
88
+ else:
89
+ return None
90
+
91
+
92
+ @dataclass
93
+ class CacheConfig:
94
+ """
95
+ Base class for cache configs
96
+ """
97
+
98
+ cache_implementation: None
99
+
100
+ @classmethod
101
+ def from_dict(cls, config_dict, **kwargs):
102
+ """
103
+ Constructs a CacheConfig instance from a dictionary of parameters.
104
+ Args:
105
+ config_dict (Dict[str, Any]): Dictionary containing configuration parameters.
106
+ **kwargs: Additional keyword arguments to override dictionary values.
107
+
108
+ Returns:
109
+ CacheConfig: Instance of CacheConfig constructed from the dictionary.
110
+ """
111
+ config = cls(**config_dict)
112
+ to_remove = []
113
+ for key, value in kwargs.items():
114
+ if hasattr(config, key):
115
+ setattr(config, key, value)
116
+ to_remove.append(key)
117
+ for key in to_remove:
118
+ kwargs.pop(key, None)
119
+ return config
120
+
121
+ # Copied from transformers.utils.quantization_config.QuantizationConfigMixin.to_json_file
122
+ def to_json_file(self, json_file_path: Union[str, os.PathLike]):
123
+ """
124
+ Save this instance to a JSON file.
125
+
126
+ Args:
127
+ json_file_path (`str` or `os.PathLike`):
128
+ Path to the JSON file in which this configuration instance's parameters will be saved.
129
+ use_diff (`bool`, *optional*, defaults to `True`):
130
+ If set to `True`, only the difference between the config instance and the default
131
+ `QuantizationConfig()` is serialized to JSON file.
132
+ """
133
+ with open(json_file_path, "w", encoding="utf-8") as writer:
134
+ config_dict = self.to_dict()
135
+ json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
136
+
137
+ writer.write(json_string)
138
+
139
+ # Copied from transformers.utils.quantization_config.QuantizationConfigMixin.to_dict
140
+ def to_dict(self) -> Dict[str, Any]:
141
+ """
142
+ Serializes this instance to a Python dictionary. Returns:
143
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
144
+ """
145
+ return copy.deepcopy(self.__dict__)
146
+
147
+ # Copied from transformers.utils.quantization_config.QuantizationConfigMixin.__iter__
148
+ def __iter__(self):
149
+ """allows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixin"""
150
+ for attr, value in copy.deepcopy(self.__dict__).items():
151
+ yield attr, value
152
+
153
+ # Copied from transformers.utils.quantization_config.QuantizationConfigMixin.__repr__
154
+ def __repr__(self):
155
+ return f"{self.__class__.__name__} {self.to_json_string()}"
156
+
157
+ def to_json_string(self):
158
+ """
159
+ Serializes this instance to a JSON formatted string.
160
+ Returns:
161
+ str: JSON formatted string representing the configuration instance.
162
+ """
163
+ return json.dumps(self.__dict__, indent=2) + "\n"
164
+
165
+ # Copied from transformers.utils.quantization_config.QuantizationConfigMixin.update
166
+ def update(self, **kwargs):
167
+ """
168
+ Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes,
169
+ returning all the unused kwargs.
170
+
171
+ Args:
172
+ kwargs (`Dict[str, Any]`):
173
+ Dictionary of attributes to tentatively update this class.
174
+
175
+ Returns:
176
+ `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
177
+ """
178
+ to_remove = []
179
+ for key, value in kwargs.items():
180
+ if hasattr(self, key):
181
+ setattr(self, key, value)
182
+ to_remove.append(key)
183
+
184
+ # Remove all the attributes that were updated, without modifying the input dict
185
+ unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}
186
+ return unused_kwargs
187
+
188
+
189
+ class DynamicCache(Cache):
190
+ """
191
+ A cache that grows dynamically as more tokens are generated. This is the default for generative models.
192
+
193
+ It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is
194
+ `[batch_size, num_heads, seq_len, head_dim]`.
195
+
196
+ Example:
197
+
198
+ ```python
199
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
200
+
201
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
202
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
203
+
204
+ >>> inputs = tokenizer(text="My name is GPT2", return_tensors="pt")
205
+
206
+ >>> # Prepare a cache class and pass it to model's forward
207
+ >>> past_key_values = DynamicCache()
208
+ >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
209
+ >>> past_kv_length = outputs.past_key_values # access cache filled with key/values from generation
210
+ ```
211
+ """
212
+
213
+ def __init__(self) -> None:
214
+ super().__init__()
215
+ self.key_cache: List[torch.Tensor] = []
216
+ self.value_cache: List[torch.Tensor] = []
217
+ self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen
218
+
219
+ def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]:
220
+ """
221
+ Support for backwards-compatible `past_key_value` indexing, e.g. `past_key_value[0][0].shape[2]` to get the
222
+ sequence length.
223
+ """
224
+ if layer_idx < len(self):
225
+ return (self.key_cache[layer_idx], self.value_cache[layer_idx])
226
+ else:
227
+ raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}")
228
+
229
+ def __iter__(self):
230
+ """
231
+ Support for backwards-compatible `past_key_value` iteration, e.g. `for x in past_key_value:` to iterate over
232
+ keys and values
233
+ """
234
+ for layer_idx in range(len(self)):
235
+ yield (self.key_cache[layer_idx], self.value_cache[layer_idx])
236
+
237
+ def __len__(self):
238
+ """
239
+ Support for backwards-compatible `past_key_value` length, e.g. `len(past_key_value)`. This value corresponds
240
+ to the number of layers in the model.
241
+ """
242
+ return len(self.key_cache)
243
+
244
+ def update(
245
+ self,
246
+ key_states: torch.Tensor,
247
+ value_states: torch.Tensor,
248
+ layer_idx: int,
249
+ cache_kwargs: Optional[Dict[str, Any]] = None,
250
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
251
+ """
252
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
253
+
254
+ Parameters:
255
+ key_states (`torch.Tensor`):
256
+ The new key states to cache.
257
+ value_states (`torch.Tensor`):
258
+ The new value states to cache.
259
+ layer_idx (`int`):
260
+ The index of the layer to cache the states for.
261
+ cache_kwargs (`Dict[str, Any]`, `optional`):
262
+ Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
263
+
264
+ Return:
265
+ A tuple containing the updated key and value states.
266
+ """
267
+ # Update the number of seen tokens
268
+ if layer_idx == 0:
269
+ self._seen_tokens += key_states.shape[-2]
270
+
271
+ # Update the cache
272
+ if len(self.key_cache) <= layer_idx:
273
+ self.key_cache.append(key_states)
274
+ self.value_cache.append(value_states)
275
+ else:
276
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
277
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
278
+
279
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
280
+
281
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
282
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
283
+ # TODO: deprecate this function in favor of `cache_position`
284
+ if len(self.key_cache) <= layer_idx:
285
+ return 0
286
+ return self.key_cache[layer_idx].shape[-2]
287
+
288
+ def get_max_length(self) -> Optional[int]:
289
+ """Returns the maximum sequence length of the cached states. DynamicCache does not have a maximum length."""
290
+ return None
291
+
292
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
293
+ """Converts the `DynamicCache` instance into the its equivalent in the legacy cache format. Used for
294
+ backward compatibility."""
295
+ legacy_cache = ()
296
+ for layer_idx in range(len(self)):
297
+ legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx]),)
298
+ return legacy_cache
299
+
300
+ @classmethod
301
+ def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None) -> "DynamicCache":
302
+ """Converts a cache in the legacy cache format into an equivalent `DynamicCache`. Used for
303
+ backward compatibility."""
304
+ cache = cls()
305
+ if past_key_values is not None:
306
+ for layer_idx in range(len(past_key_values)):
307
+ key_states, value_states = past_key_values[layer_idx]
308
+ cache.update(key_states, value_states, layer_idx)
309
+ return cache
310
+
311
+ def crop(self, max_length: int):
312
+ """Crop the past key values up to a new `max_length` in terms of tokens. `max_length` can also be
313
+ negative to remove `max_length` tokens. This is used in assisted decoding and contrastive search."""
314
+ # In case it is negative
315
+ if max_length < 0:
316
+ max_length = self.get_seq_length() - abs(max_length)
317
+
318
+ if self.get_seq_length() <= max_length:
319
+ return
320
+
321
+ self._seen_tokens = max_length
322
+ for idx in range(len(self.key_cache)):
323
+ self.key_cache[idx] = self.key_cache[idx][..., :max_length, :]
324
+ self.value_cache[idx] = self.value_cache[idx][..., :max_length, :]
325
+
326
+ def batch_split(self, full_batch_size: int, split_size: int) -> List["DynamicCache"]:
327
+ """Split the current instance into a list of `DynamicCache` by the batch size. This will be used by
328
+ `_split_model_inputs()` in `generation.utils`"""
329
+ out = []
330
+ for i in range(0, full_batch_size, split_size):
331
+ current_split = DynamicCache()
332
+ current_split._seen_tokens = self._seen_tokens
333
+ current_split.key_cache = [tensor[i : i + split_size] for tensor in self.key_cache]
334
+ current_split.value_cache = [tensor[i : i + split_size] for tensor in self.value_cache]
335
+ out.append(current_split)
336
+ return out
337
+
338
+ @classmethod
339
+ def from_batch_splits(cls, splits: List["DynamicCache"]) -> "DynamicCache":
340
+ """This is the opposite of the above `batch_split()` method. This will be used by `stack_model_outputs` in
341
+ `generation.utils`"""
342
+ cache = cls()
343
+ for idx in range(len(splits[0])):
344
+ layer_keys = torch.cat([current.key_cache[idx] for current in splits], dim=0)
345
+ layer_values = torch.cat([current.value_cache[idx] for current in splits], dim=0)
346
+ cache.update(layer_keys, layer_values, idx)
347
+ return cache
348
+
349
+ def batch_repeat_interleave(self, repeats: int):
350
+ """Repeat the cache `repeats` times in the batch dimension. Used in contrastive search."""
351
+ for layer_idx in range(len(self)):
352
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].repeat_interleave(repeats, dim=0)
353
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].repeat_interleave(repeats, dim=0)
354
+
355
+ def batch_select_indices(self, indices: torch.Tensor):
356
+ """Only keep the `indices` in the batch dimension of the cache. Used in contrastive search."""
357
+ for layer_idx in range(len(self)):
358
+ self.key_cache[layer_idx] = self.key_cache[layer_idx][indices, ...]
359
+ self.value_cache[layer_idx] = self.value_cache[layer_idx][indices, ...]
360
+
361
+
362
+ class OffloadedCache(DynamicCache):
363
+ """
364
+ A drop-in replacement for DynamicCache that conserves GPU memory at the expense of more CPU memory.
365
+ Useful for generating from models with very long context.
366
+
367
+ In addition to the default CUDA stream, where all forward() computations happen,
368
+ this class uses another stream, the prefetch stream, which it creates itself.
369
+ Since scheduling of operations on separate streams happens independently, this class uses
370
+ the prefetch stream to asynchronously prefetch the KV cache of layer k+1 when layer k is executing.
371
+ The movement of the layer k-1 cache to the CPU is handled by the default stream as a simple way to
372
+ ensure the eviction is scheduled after all computations on that cache are finished.
373
+ """
374
+
375
+ def __init__(self) -> None:
376
+ if not torch.cuda.is_available():
377
+ raise RuntimeError("OffloadedCache can only be used with a GPU")
378
+ super().__init__()
379
+ self.original_device = []
380
+ self.prefetch_stream = torch.cuda.Stream()
381
+ self.beam_idx = None # used to delay beam search operations
382
+
383
+ def prefetch_layer(self, layer_idx: int):
384
+ "Starts prefetching the next layer cache"
385
+ if layer_idx < len(self):
386
+ with torch.cuda.stream(self.prefetch_stream):
387
+ # Prefetch next layer tensors to GPU
388
+ device = self.original_device[layer_idx]
389
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].to(device, non_blocking=True)
390
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].to(device, non_blocking=True)
391
+
392
+ def evict_previous_layer(self, layer_idx: int):
393
+ "Moves the previous layer cache to the CPU"
394
+ if len(self) > 2:
395
+ # We do it on the default stream so it occurs after all earlier computations on these tensors are done
396
+ prev_layer_idx = (layer_idx - 1) % len(self)
397
+ self.key_cache[prev_layer_idx] = self.key_cache[prev_layer_idx].to("cpu", non_blocking=True)
398
+ self.value_cache[prev_layer_idx] = self.value_cache[prev_layer_idx].to("cpu", non_blocking=True)
399
+
400
+ def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]:
401
+ "Gets the cache for this layer to the device. Prefetches the next and evicts the previous layer."
402
+ if layer_idx < len(self):
403
+ # Evict the previous layer if necessary
404
+ torch.cuda.current_stream().synchronize()
405
+ self.evict_previous_layer(layer_idx)
406
+ # Load current layer cache to its original device if not already there
407
+ original_device = self.original_device[layer_idx]
408
+ self.prefetch_stream.synchronize()
409
+ key_tensor = self.key_cache[layer_idx]
410
+ value_tensor = self.value_cache[layer_idx]
411
+ # Now deal with beam search ops which were delayed
412
+ if self.beam_idx is not None:
413
+ self.beam_idx = self.beam_idx.to(original_device)
414
+ key_tensor = key_tensor.index_select(0, self.beam_idx)
415
+ value_tensor = value_tensor.index_select(0, self.beam_idx)
416
+ # Prefetch the next layer
417
+ self.prefetch_layer((layer_idx + 1) % len(self))
418
+ return (key_tensor, value_tensor)
419
+ else:
420
+ raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}")
421
+
422
+ def reorder_cache(self, beam_idx: torch.LongTensor):
423
+ """Saves the beam indices and reorders the cache when the tensor is back to its device."""
424
+ # We delay this operation until the tensors are back to their original
425
+ # device because performing torch.index_select on the CPU is very slow
426
+ del self.beam_idx
427
+ self.beam_idx = beam_idx.clone()
428
+
429
+ def update(
430
+ self,
431
+ key_states: torch.Tensor,
432
+ value_states: torch.Tensor,
433
+ layer_idx: int,
434
+ cache_kwargs: Optional[Dict[str, Any]] = None,
435
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
436
+ """
437
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
438
+ Parameters:
439
+ key_states (`torch.Tensor`):
440
+ The new key states to cache.
441
+ value_states (`torch.Tensor`):
442
+ The new value states to cache.
443
+ layer_idx (`int`):
444
+ The index of the layer to cache the states for.
445
+ cache_kwargs (`Dict[str, Any]`, `optional`):
446
+ Additional arguments for the cache subclass. No additional arguments are used in `OffloadedCache`.
447
+ Return:
448
+ A tuple containing the updated key and value states.
449
+ """
450
+ # Update the number of seen tokens
451
+ if layer_idx == 0:
452
+ self._seen_tokens += key_states.shape[-2]
453
+
454
+ # Update the cache
455
+ if len(self.key_cache) <= layer_idx:
456
+ self.key_cache.append(key_states)
457
+ self.value_cache.append(value_states)
458
+ self.original_device.append(key_states.device)
459
+ self.evict_previous_layer(layer_idx)
460
+ else:
461
+ key_tensor, value_tensor = self[layer_idx]
462
+ self.key_cache[layer_idx] = torch.cat([key_tensor, key_states], dim=-2)
463
+ self.value_cache[layer_idx] = torch.cat([value_tensor, value_states], dim=-2)
464
+
465
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
466
+
467
+ # According to https://docs.python.org/3/library/exceptions.html#NotImplementedError
468
+ # if a method is not supposed to be supported in a subclass we should set it to None
469
+ from_legacy_cache = None
470
+
471
+ to_legacy_cache = None
472
+
473
+
474
+ class SinkCache(Cache):
475
+ """
476
+ A cache that as described in the [Attention Sinks paper](https://arxiv.org/abs/2309.17453). It allows the model to
477
+ generate beyond the length of its context window, without losing fluency in the conversation. As it discards past
478
+ tokens, the model will lose the ability to generate tokens that depend on the context that was discarded.
479
+
480
+ It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is
481
+ `[batch_size, num_heads, seq_len, head_dim]`.
482
+
483
+ Parameters:
484
+ window_length (`int`):
485
+ The length of the context window.
486
+ num_sink_tokens (`int`):
487
+ The number of sink tokens. See the original paper for more information.
488
+
489
+ Example:
490
+
491
+ ```python
492
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, SinkCache
493
+
494
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
495
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
496
+
497
+ >>> inputs = tokenizer(text="My name is GPT2", return_tensors="pt")
498
+
499
+ >>> # Prepare a cache class and pass it to model's forward
500
+ >>> past_key_values = SinkCache(window_length=256, num_sink_tokens=4)
501
+ >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
502
+ >>> past_kv_length = outputs.past_key_values # access cache filled with key/values from generation
503
+ ```
504
+ """
505
+
506
+ def __init__(self, window_length: int, num_sink_tokens: int) -> None:
507
+ super().__init__()
508
+ self.key_cache: List[torch.Tensor] = []
509
+ self.value_cache: List[torch.Tensor] = []
510
+ self.window_length = window_length
511
+ self.num_sink_tokens = num_sink_tokens
512
+ self.cos_sin_rerotation_cache = {}
513
+ self._cos_cache = None
514
+ self._sin_cache = None
515
+ self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen
516
+
517
+ @staticmethod
518
+ def _rotate_half(x):
519
+ x1 = x[..., : x.shape[-1] // 2]
520
+ x2 = x[..., x.shape[-1] // 2 :]
521
+ return torch.cat((-x2, x1), dim=-1)
522
+
523
+ def _apply_key_rotary_pos_emb(
524
+ self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
525
+ ) -> torch.Tensor:
526
+ rotated_key_states = (key_states * cos) + (self._rotate_half(key_states) * sin)
527
+ return rotated_key_states
528
+
529
+ def _get_rerotation_cos_sin(
530
+ self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
531
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
532
+ if key_states.shape[-2] not in self.cos_sin_rerotation_cache:
533
+ # Upcast to float32 temporarily for better accuracy
534
+ cos = cos.to(torch.float32)
535
+ sin = sin.to(torch.float32)
536
+
537
+ # Compute the cos and sin required for back- and forward-rotating to one position earlier in the sequence
538
+ original_cos = cos[self.num_sink_tokens + key_states.shape[-2] :]
539
+ shifted_cos = cos[self.num_sink_tokens : -key_states.shape[-2]]
540
+ original_sin = sin[self.num_sink_tokens + key_states.shape[-2] :]
541
+ shifted_sin = sin[self.num_sink_tokens : -key_states.shape[-2]]
542
+ rerotation_cos = original_cos * shifted_cos + original_sin * shifted_sin
543
+ rerotation_sin = -original_sin * shifted_cos + original_cos * shifted_sin
544
+
545
+ self.cos_sin_rerotation_cache[key_states.shape[-2]] = (
546
+ rerotation_cos.to(key_states.dtype).unsqueeze(0),
547
+ rerotation_sin.to(key_states.dtype).unsqueeze(0),
548
+ )
549
+ return self.cos_sin_rerotation_cache[key_states.shape[-2]]
550
+
551
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
552
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
553
+ # TODO: deprecate this function in favor of `cache_position`
554
+ # Workaround to make 'key_states.shape[-2] + past_key_value.get_seq_length(self.layer_idx)' <= window_length
555
+ if len(self.key_cache) <= layer_idx:
556
+ return 0
557
+ return self.key_cache[layer_idx].shape[-2]
558
+
559
+ def get_max_length(self) -> Optional[int]:
560
+ """Returns the maximum sequence length of the cached states."""
561
+ return self.window_length
562
+
563
+ def update(
564
+ self,
565
+ key_states: torch.Tensor,
566
+ value_states: torch.Tensor,
567
+ layer_idx: int,
568
+ cache_kwargs: Optional[Dict[str, Any]] = None,
569
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
570
+ """
571
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
572
+
573
+ Parameters:
574
+ key_states (`torch.Tensor`):
575
+ The new key states to cache.
576
+ value_states (`torch.Tensor`):
577
+ The new value states to cache.
578
+ layer_idx (`int`):
579
+ The index of the layer to cache the states for.
580
+ cache_kwargs (`Dict[str, Any]`, `optional`):
581
+ Additional arguments for the cache subclass. The following arguments can be used in `SinkCache`: `sin`,
582
+ `cos` and `partial_rotation_size`. These arguments are used with models using RoPE, to recompute the
583
+ rotation as the tokens are shifted.
584
+
585
+ Return:
586
+ A tuple containing the updated key and value states.
587
+ """
588
+ # Optional kwargs for `SinkCache` -- needed on models using RoPE. `partial_rotation_size` is used on models
589
+ # with partially rotated position embeddings, like Phi or Persimmon.
590
+ sin = cache_kwargs.get("sin")
591
+ cos = cache_kwargs.get("cos")
592
+ partial_rotation_size = cache_kwargs.get("partial_rotation_size")
593
+ using_rope = cos is not None and sin is not None
594
+
595
+ # Update the number of seen tokens
596
+ if layer_idx == 0:
597
+ self._seen_tokens += key_states.shape[-2]
598
+
599
+ # Update the sin/cos cache, which holds sin/cos values for all possible positions
600
+ if using_rope and layer_idx == 0:
601
+ # BC: some models still pass `sin`/`cos` with 2 dims. In those models, they are the full sin/cos. Remove
602
+ # after all RoPE models have a llama-like cache utilization.
603
+ if cos.dim() == 2:
604
+ self._cos_cache = cos
605
+ self._sin_cache = sin
606
+ else:
607
+ if self._cos_cache is None:
608
+ self._cos_cache = cos[0, ...]
609
+ self._sin_cache = sin[0, ...]
610
+ elif self._cos_cache.shape[0] < self.window_length:
611
+ self._cos_cache = torch.cat([self._cos_cache, cos[0, ...]], dim=0)
612
+ self._sin_cache = torch.cat([self._sin_cache, sin[0, ...]], dim=0)
613
+
614
+ # [bsz, num_heads, seq_len, head_dim]
615
+ if len(self.key_cache) <= layer_idx:
616
+ # Empty cache
617
+ self.key_cache.append(key_states)
618
+ self.value_cache.append(value_states)
619
+
620
+ elif key_states.shape[-2] + self.get_seq_length(layer_idx) < self.window_length:
621
+ # Growing cache
622
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
623
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
624
+
625
+ else:
626
+ # Shifting cache
627
+ keys_to_keep = self.key_cache[layer_idx][
628
+ :, :, -self.window_length + self.num_sink_tokens + key_states.shape[-2] :
629
+ ]
630
+
631
+ # On RoPE models, we need to recompute the Key rotation as the tokens are shifted
632
+ if using_rope:
633
+ rerotation_cos, rerotation_sin = self._get_rerotation_cos_sin(
634
+ key_states, self._cos_cache[: self.window_length], self._sin_cache[: self.window_length]
635
+ )
636
+ if partial_rotation_size is not None:
637
+ keys_to_keep, keys_pass = (
638
+ keys_to_keep[..., :partial_rotation_size],
639
+ keys_to_keep[..., partial_rotation_size:],
640
+ )
641
+ keys_to_keep = self._apply_key_rotary_pos_emb(keys_to_keep, rerotation_cos, rerotation_sin)
642
+ if partial_rotation_size is not None:
643
+ keys_to_keep = torch.cat((keys_to_keep, keys_pass), dim=-1)
644
+
645
+ # Concatenate sink tokens, shifted & rotated tokens (if needed), and new tokens
646
+ sink_keys = self.key_cache[layer_idx][:, :, : self.num_sink_tokens]
647
+ self.key_cache[layer_idx] = torch.cat([sink_keys, keys_to_keep, key_states], dim=-2)
648
+
649
+ sink_values = self.value_cache[layer_idx][:, :, : self.num_sink_tokens]
650
+ values_to_keep = self.value_cache[layer_idx][
651
+ :, :, -self.window_length + self.num_sink_tokens + value_states.shape[-2] :
652
+ ]
653
+ self.value_cache[layer_idx] = torch.cat([sink_values, values_to_keep, value_states], dim=-2)
654
+
655
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
656
+
657
+
658
+ class StaticCache(Cache):
659
+ """
660
+ Static Cache class to be used with `torch.compile(model)` and `torch.export()`.
661
+
662
+ Parameters:
663
+ config (`PretrainedConfig`):
664
+ The configuration file defining the shape-related attributes required to initialize the static cache.
665
+ max_batch_size (`int`):
666
+ The maximum batch size with which the model will be used.
667
+ max_cache_len (`int`):
668
+ The maximum sequence length with which the model will be used.
669
+ device (`torch.device`):
670
+ The device on which the cache should be initialized. Should be the same as the layer.
671
+ dtype (*optional*, defaults to `torch.float32`):
672
+ The default `dtype` to use when initializing the layer.
673
+
674
+ Example:
675
+
676
+ ```python
677
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, StaticCache
678
+
679
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
680
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
681
+
682
+ >>> inputs = tokenizer(text="My name is GPT2", return_tensors="pt")
683
+
684
+ >>> # Prepare a cache class and pass it to model's forward
685
+ >>> # Leave empty space for 10 new tokens, which can be used when calling forward iteratively 10 times to generate
686
+ >>> max_generated_length = inputs.input_ids.shape[1] + 10
687
+ >>> past_key_values = StaticCache(config=model.config, max_batch_size=1, max_cache_len=max_generated_length, device=model.device, dtype=model.dtype)
688
+ >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
689
+ >>> past_kv_length = outputs.past_key_values # access cache filled with key/values from generation
690
+ ```
691
+ """
692
+
693
+ def __init__(self, config: PretrainedConfig, max_batch_size: int, max_cache_len: int, device, dtype=None) -> None:
694
+ super().__init__()
695
+ self.max_batch_size = max_batch_size
696
+ self.max_cache_len = config.max_position_embeddings if max_cache_len is None else max_cache_len
697
+ # Some model define a custom `head_dim` != config.hidden_size // config.num_attention_heads
698
+ self.head_dim = (
699
+ config.head_dim if hasattr(config, "head_dim") else config.hidden_size // config.num_attention_heads
700
+ )
701
+
702
+ self.dtype = dtype if dtype is not None else torch.float32
703
+ self.num_key_value_heads = (
704
+ config.num_attention_heads if config.num_key_value_heads is None else config.num_key_value_heads
705
+ )
706
+
707
+ self.key_cache: List[torch.Tensor] = []
708
+ self.value_cache: List[torch.Tensor] = []
709
+ # Note: There will be significant perf decrease if switching to use 5D tensors instead.
710
+ cache_shape = (max_batch_size, self.num_key_value_heads, self.max_cache_len, self.head_dim)
711
+ for idx in range(config.num_hidden_layers):
712
+ new_layer_key_cache = torch.zeros(cache_shape, dtype=self.dtype, device=device)
713
+ new_layer_value_cache = torch.zeros(cache_shape, dtype=self.dtype, device=device)
714
+ # Notes:
715
+ # 1. `mark_static_address` is used to tag the cache as an fixed data pointer, preventing cuda graph
716
+ # breaks when updating the cache. It can't be used if the cache code is being compiled (but in that case
717
+ # it is not needed anyway)
718
+ # 2. `torch.export()` requires mutations to be registered as buffers.
719
+ if not is_torchdynamo_compiling():
720
+ self.register_buffer(f"key_cache_{idx}", torch.zeros(cache_shape, dtype=dtype, device=device))
721
+ self.register_buffer(f"value_cache_{idx}", torch.zeros(cache_shape, dtype=dtype, device=device))
722
+ new_layer_key_cache = getattr(self, f"key_cache_{idx}")
723
+ new_layer_value_cache = getattr(self, f"value_cache_{idx}")
724
+ torch._dynamo.mark_static_address(new_layer_key_cache)
725
+ torch._dynamo.mark_static_address(new_layer_value_cache)
726
+ self.key_cache.append(new_layer_key_cache)
727
+ self.value_cache.append(new_layer_value_cache)
728
+ self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen
729
+
730
+ def update(
731
+ self,
732
+ key_states: torch.Tensor,
733
+ value_states: torch.Tensor,
734
+ layer_idx: int,
735
+ cache_kwargs: Optional[Dict[str, Any]] = None,
736
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
737
+ """
738
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
739
+ It is VERY important to index using a tensor, otherwise you introduce a copy to the device.
740
+
741
+ Parameters:
742
+ key_states (`torch.Tensor`):
743
+ The new key states to cache.
744
+ value_states (`torch.Tensor`):
745
+ The new value states to cache.
746
+ layer_idx (`int`):
747
+ The index of the layer to cache the states for.
748
+ cache_kwargs (`Dict[str, Any]`, `optional`):
749
+ Additional arguments for the cache subclass. The `StaticCache` needs the `cache_position` input
750
+ to know how where to write in the cache.
751
+
752
+ Return:
753
+ A tuple containing the updated key and value states.
754
+ """
755
+ # Update the number of seen tokens
756
+ if layer_idx == 0:
757
+ self._seen_tokens += key_states.shape[-2]
758
+
759
+ cache_position = cache_kwargs.get("cache_position")
760
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].to(device=key_states.device)
761
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].to(device=value_states.device)
762
+ k_out = self.key_cache[layer_idx]
763
+ v_out = self.value_cache[layer_idx]
764
+
765
+ if cache_position is None:
766
+ k_out.copy_(key_states)
767
+ v_out.copy_(value_states)
768
+ else:
769
+ # Note: here we use `tensor.index_copy_(dim, index, tensor)` that is equivalent to
770
+ # `tensor[:, :, index] = tensor`, but the first one is compile-friendly and it does explicitly an in-place
771
+ # operation, that avoids copies and uses less memory.
772
+ try:
773
+ k_out.index_copy_(2, cache_position, key_states)
774
+ v_out.index_copy_(2, cache_position, value_states)
775
+ except NotImplementedError:
776
+ # The operator 'aten::index_copy.out' is not currently implemented for the MPS device.
777
+ k_out[:, :, cache_position] = key_states
778
+ v_out[:, :, cache_position] = value_states
779
+
780
+ return k_out, v_out
781
+
782
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
783
+ """Returns the sequence length of the cached states that were seen by the model."""
784
+ # Occupied cache == any slot in the 3rd dim (sequence length) holds a non-zero value. To save on compute, let's
785
+ # limit the check to the first batch member and head dimension.
786
+ # TODO: deprecate this function in favor of `cache_position`
787
+ # return (self.key_cache[layer_idx][0, 0].any(dim=-1)).sum()
788
+ return self._seen_tokens
789
+
790
+ def get_max_length(self) -> Optional[int]:
791
+ """Returns the maximum sequence length of the cached states."""
792
+ return self.max_cache_len
793
+
794
+ def reset(self):
795
+ self._seen_tokens = 0
796
+ """Resets the cache values while preserving the objects"""
797
+ for layer_idx in range(len(self.key_cache)):
798
+ # In-place ops prevent breaking the static address
799
+ self.key_cache[layer_idx].zero_()
800
+ self.value_cache[layer_idx].zero_()
801
+
802
+
803
+ class SlidingWindowCache(StaticCache):
804
+ """
805
+ Sliding Window Cache class to be used with `torch.compile` for models like Mistral that support sliding window attention.
806
+ Every time when we try to update the cache, we compute the `indices` based on `cache_position >= self.config.sliding_window - 1`,
807
+ if true(which means the cache can not hold all the old key value states and new states together because of the sliding window constraint),
808
+ we need to do a cycle shift based on `indices` to replace the oldest states by the new key value states passed in.
809
+
810
+ The `to_shift` is only true once we are above sliding_window. Thus with `sliding_window==64`:
811
+
812
+ indices = (slicing + to_shift[-1].int()-1) % self.config.sliding_window
813
+ tensor([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
814
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
815
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
816
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 0])
817
+
818
+ We overwrite the cache using these, then we always write at cache_position (clamped to `sliding_window`)
819
+
820
+ Parameters:
821
+ config (`PretrainedConfig`):
822
+ The configuration file defining the shape-related attributes required to initialize the static cache.
823
+ max_batch_size (`int`):
824
+ The maximum batch size with which the model will be used.
825
+ max_cache_len (`int`):
826
+ The maximum sequence length with which the model will be used.
827
+ device (`torch.device`):
828
+ The device on which the cache should be initialized. Should be the same as the layer.
829
+ dtype (*optional*, defaults to `torch.float32`):
830
+ The default `dtype` to use when initializing the layer.
831
+
832
+ Example:
833
+
834
+ ```python
835
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, SlidingWindowCache
836
+
837
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
838
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
839
+
840
+ >>> inputs = tokenizer(text="My name is GPT2", return_tensors="pt")
841
+
842
+ >>> # Prepare a cache class and pass it to model's forward
843
+ >>> # Leave empty space for 10 new tokens, which can be used when calling forward iteratively 10 times to generate
844
+ >>> max_generated_length = inputs.input_ids.shape[1] + 10
845
+ >>> past_key_values = SlidingWindowCache(config=model.config, max_batch_size=1, max_cache_len=max_generated_length, device=model.device, dtype=model.dtype)
846
+ >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
847
+ >>> past_kv_length = outputs.past_key_values # access cache filled with key/values from generation
848
+ ```
849
+ """
850
+
851
+ def __init__(self, config: PretrainedConfig, max_batch_size: int, max_cache_len: int, device, dtype=None) -> None:
852
+ super().__init__(config, max_batch_size, max_cache_len, device, dtype)
853
+ if not hasattr(config, "sliding_window") or config.sliding_window is None:
854
+ raise ValueError(
855
+ "Setting `cache_implementation` to 'sliding_window' requires the model config supporting "
856
+ "sliding window attention, please check if there is a `sliding_window` field in the model "
857
+ "config and it's not set to None."
858
+ )
859
+ max_cache_len = min(config.sliding_window, max_cache_len)
860
+ super().__init__(
861
+ config=config, max_batch_size=max_batch_size, max_cache_len=max_cache_len, device=device, dtype=dtype
862
+ )
863
+
864
+ def update(
865
+ self,
866
+ key_states: torch.Tensor,
867
+ value_states: torch.Tensor,
868
+ layer_idx: int,
869
+ cache_kwargs: Optional[Dict[str, Any]] = None,
870
+ ) -> Tuple[torch.Tensor]:
871
+ cache_position = cache_kwargs.get("cache_position")
872
+ k_out = self.key_cache[layer_idx]
873
+ v_out = self.value_cache[layer_idx]
874
+
875
+ # assume this only happens in prefill phase when prompt length > sliding_window_size (= max_cache_len)
876
+ if cache_position.shape[0] > self.max_cache_len:
877
+ k_out = key_states[:, :, -self.max_cache_len :, :]
878
+ v_out = value_states[:, :, -self.max_cache_len :, :]
879
+ # Assumption: caches are all zeros at this point, `+=` is equivalent to `=` but compile-friendly
880
+ self.key_cache[layer_idx] += k_out
881
+ self.value_cache[layer_idx] += v_out
882
+ # we should return the whole states instead of k_out, v_out to take the whole prompt
883
+ # into consideration when building kv cache instead of just throwing away tokens outside of the window
884
+ return key_states, value_states
885
+
886
+ slicing = torch.ones(self.max_cache_len, dtype=torch.long, device=value_states.device).cumsum(0)
887
+ cache_position = cache_position.clamp(0, self.max_cache_len - 1)
888
+ to_shift = cache_position >= self.max_cache_len - 1
889
+ indices = (slicing + to_shift[-1].int() - 1) % self.max_cache_len
890
+
891
+ k_out = k_out[:, :, indices]
892
+ v_out = v_out[:, :, indices]
893
+
894
+ try:
895
+ cache_position.to(device=k_out.device)
896
+ k_out.index_copy_(2, cache_position, key_states)
897
+ v_out.index_copy_(2, cache_position, value_states)
898
+ except NotImplementedError:
899
+ # The operator 'aten::index_copy.out' is not currently implemented for the MPS device.
900
+ k_out[:, :, cache_position] = key_states
901
+ v_out[:, :, cache_position] = value_states
902
+
903
+ # `_.zero()` followed by `+=` is equivalent `=`, but compile-friendly (without graph breaks due to assignment)
904
+ self.key_cache[layer_idx].zero_()
905
+ self.value_cache[layer_idx].zero_()
906
+
907
+ self.key_cache[layer_idx] += k_out
908
+ self.value_cache[layer_idx] += v_out
909
+
910
+ return k_out, v_out
911
+
912
+ def get_max_length(self) -> Optional[int]:
913
+ # in theory there is no limit because the sliding window size is fixed no matter how long the sentence is
914
+ return None
915
+
916
+ def reset(self):
917
+ for layer_idx in range(len(self.key_cache)):
918
+ # In-place ops prevent breaking the static address
919
+ self.key_cache[layer_idx].zero_()
920
+ self.value_cache[layer_idx].zero_()
921
+
922
+
923
+ class EncoderDecoderCache(Cache):
924
+ """
925
+ Base, abstract class for all encoder-decoder caches. Can be used to hold combinations of self-attention and
926
+ cross-attention caches.
927
+
928
+ Example:
929
+
930
+ ```python
931
+ >>> from transformers import AutoProcessor, AutoModelForCausalLM, DynamicCache, EncoderDecoderCache
932
+
933
+ >>> model = AutoModelForCausalLM.from_pretrained("openai/whisper-small")
934
+ >>> processor = AutoProcessor.from_pretrained("openai/whisper-small")
935
+
936
+ >>> inputs = processor(audio=YOUR-AUDIO, return_tensors="pt")
937
+
938
+ >>> # Prepare cache classes for encoder and decoder and pass it to model's forward
939
+ >>> self_attention_cache = DynamicCache()
940
+ >>> cross_attention_cache = DynamicCache()
941
+ >>> past_key_values = EncoderDecoderCache(self_attention_cache, cross_attention_cache)
942
+ >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
943
+ >>> past_kv_length = outputs.past_key_values # access cache filled with key/values from generation
944
+ ```
945
+
946
+ """
947
+
948
+ def __init__(self, self_attention_cache: Cache, cross_attention_cache: Cache):
949
+ super().__init__()
950
+ self.self_attention_cache = self_attention_cache
951
+ self.cross_attention_cache = cross_attention_cache
952
+
953
+ self.is_updated = {}
954
+ for layer_idx in range(len(cross_attention_cache.key_cache)):
955
+ self.is_updated[layer_idx] = bool(cross_attention_cache.get_seq_length(layer_idx) > 0)
956
+
957
+ def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]:
958
+ """
959
+ Support for backwards-compatible `past_key_value` indexing, e.g. `past_key_value[0][0].shape[2]` to get the
960
+ sequence length.
961
+ """
962
+ if layer_idx < len(self):
963
+ return (
964
+ self.self_attention_cache.key_cache[layer_idx],
965
+ self.self_attention_cache.value_cache[layer_idx],
966
+ self.cross_attention_cache.key_cache[layer_idx],
967
+ self.cross_attention_cache.value_cache[layer_idx],
968
+ )
969
+ else:
970
+ raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}")
971
+
972
+ def __len__(self):
973
+ """
974
+ Support for backwards-compatible `past_key_value` length, e.g. `len(past_key_value)`. This value corresponds
975
+ to the number of layers in the model.
976
+ """
977
+ return len(self.self_attention_cache)
978
+
979
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
980
+ """Converts the `EncoderDecoderCache` instance into its equivalent in the legacy cache format."""
981
+ legacy_cache = ()
982
+ if len(self.cross_attention_cache) > 0:
983
+ for self_attn, cross_attn in zip(
984
+ self.self_attention_cache.to_legacy_cache(), self.cross_attention_cache.to_legacy_cache()
985
+ ):
986
+ legacy_cache += (self_attn + cross_attn,)
987
+ else:
988
+ legacy_cache = self.self_attention_cache.to_legacy_cache()
989
+ return legacy_cache
990
+
991
+ @classmethod
992
+ def from_legacy_cache(
993
+ cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
994
+ ) -> "EncoderDecoderCache":
995
+ """Converts a cache in the legacy cache format into an equivalent `EncoderDecoderCache`."""
996
+ cache = cls(self_attention_cache=DynamicCache(), cross_attention_cache=DynamicCache())
997
+ if past_key_values is not None:
998
+ for layer_idx in range(len(past_key_values)):
999
+ key_states, value_states = past_key_values[layer_idx][:2]
1000
+ cache.self_attention_cache.update(key_states, value_states, layer_idx)
1001
+ if len(past_key_values[layer_idx]) > 2:
1002
+ key_states, value_states = past_key_values[layer_idx][2:]
1003
+ cache.cross_attention_cache.update(key_states, value_states, layer_idx)
1004
+ cache.is_updated[layer_idx] = True
1005
+ return cache
1006
+
1007
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
1008
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
1009
+ if len(self.self_attention_cache.key_cache) <= layer_idx:
1010
+ return 0
1011
+ return (self.self_attention_cache.key_cache[layer_idx][0, 0].any(dim=-1)).sum()
1012
+
1013
+ def reset(self):
1014
+ if hasattr(self.self_attention_cache, "reset"):
1015
+ self.self_attention_cache.reset()
1016
+ if hasattr(self.cross_attention_cache, "reset"):
1017
+ self.cross_attention_cache.reset()
1018
+ elif not hasattr(self.self_attention_cache, "reset") and not hasattr(self.cross_attention_cache, "reset"):
1019
+ raise ValueError(
1020
+ "Neither self nor cross-attention cache have valid `.reset()` methods. `.reset()` should "
1021
+ "only be called on compatible cache classes, such as `StaticCache` or `SlidingWindowCache`. "
1022
+ f"Got {self.self_attention_cache.__str__()} for the self attention cache and "
1023
+ f"{self.cross_attention_cache.__str__()} for the cross attention cache."
1024
+ )
1025
+ for layer_idx in self.is_updated:
1026
+ self.is_updated[layer_idx] = False
1027
+
1028
+ def reorder_cache(self, beam_idx: torch.LongTensor):
1029
+ """Reorders the cache for beam search, given the selected beam indices."""
1030
+ self.self_attention_cache.reorder_cache(beam_idx)
1031
+ self.cross_attention_cache.reorder_cache(beam_idx)
1032
+
1033
+ def check_dynamic_cache(self, method: str):
1034
+ if not (
1035
+ isinstance(self.self_attention_cache, DynamicCache)
1036
+ and isinstance(self.cross_attention_cache, DynamicCache)
1037
+ ):
1038
+ raise ValueError(
1039
+ f"`{method}` is only defined for dynamic cache, got {self.self_attention_cache.__str__()} for the self "
1040
+ f"attention cache and {self.cross_attention_cache.__str__()} for the cross attention cache."
1041
+ )
1042
+
1043
+ # TODO(gante, sanchit-gandhi): move following functionality into `.generate`
1044
+ def crop(self, maximum_length: int):
1045
+ """Crop the past key values up to a new `maximum_length` in terms of tokens. `maximum_length` can also be
1046
+ negative to remove `maximum_length` tokens. This is used in assisted decoding and contrastive search."""
1047
+ self.check_dynamic_cache(self.crop.__name__)
1048
+ self.self_attention_cache.crop(maximum_length)
1049
+
1050
+ def batch_split(self, full_batch_size: int, split_size: int) -> "List[EncoderDecoderCache]":
1051
+ """Split the current instance into a list of `DynamicCache` by the batch size. This will be used by
1052
+ `_split_model_inputs()` in `generation.utils`"""
1053
+ self.check_dynamic_cache(self.batch_split.__name__)
1054
+ self_attention_cache = self.self_attention_cache.batch_split(full_batch_size, split_size)
1055
+ cross_attention_cache = self.cross_attention_cache.batch_split(full_batch_size, split_size)
1056
+
1057
+ out = []
1058
+ for self_attn, cross_attn in zip(self_attention_cache, cross_attention_cache):
1059
+ out.append(EncoderDecoderCache(self_attn, cross_attn))
1060
+ return out
1061
+
1062
+ @classmethod
1063
+ def from_batch_splits(cls, splits: List["EncoderDecoderCache"]) -> "EncoderDecoderCache":
1064
+ """This is the opposite of the above `batch_split()` method. This will be used by `stack_model_outputs` in
1065
+ `generation.utils`"""
1066
+ self_attention_cache = DynamicCache()
1067
+ cross_attention_cache = DynamicCache()
1068
+ for idx in range(len(splits[0])):
1069
+ layer_keys = torch.cat([current.self_attention_cache.key_cache[idx] for current in splits], dim=0)
1070
+ layer_values = torch.cat([current.self_attention_cache.value_cache[idx] for current in splits], dim=0)
1071
+ self_attention_cache.update(layer_keys, layer_values, idx)
1072
+
1073
+ layer_keys = torch.cat([current.cross_attention_cache.key_cache[idx] for current in splits], dim=0)
1074
+ layer_values = torch.cat([current.cross_attention_cache.value_cache[idx] for current in splits], dim=0)
1075
+ cross_attention_cache.update(layer_keys, layer_values, idx)
1076
+ return cls(self_attention_cache, cross_attention_cache)
1077
+
1078
+ def batch_repeat_interleave(self, repeats: int):
1079
+ """Repeat the cache `repeats` times in the batch dimension. Used in contrastive search."""
1080
+ self.check_dynamic_cache(self.batch_repeat_interleave.__name__)
1081
+ self.self_attention_cache.batch_repeat_interleave(repeats)
1082
+ self.cross_attention_cache.batch_repeat_interleave(repeats)
1083
+
1084
+ def batch_select_indices(self, indices: torch.Tensor):
1085
+ """Only keep the `indices` in the batch dimension of the cache. Used in contrastive search."""
1086
+ self.check_dynamic_cache(self.batch_select_indices.__name__)
1087
+ self.self_attention_cache.batch_select_indices(indices)
1088
+ self.cross_attention_cache.batch_select_indices(indices)
1089
+
1090
+
1091
+ class HybridCache(Cache):
1092
+ """
1093
+ Hybrid Cache class to be used with `torch.compile` for Gemma2 models that alternate between a local sliding window attention
1094
+ and global attention in every other layer. Under the hood, Hybrid Cache leverages ["SlidingWindowCache"] for sliding window attention
1095
+ and ["StaticCache"] for global attention. For more information, see the documentation of each subcomponeent cache class.
1096
+
1097
+ Parameters:
1098
+ config (`PretrainedConfig):
1099
+ The configuration file defining the shape-related attributes required to initialize the static cache.
1100
+ max_batch_size (`int`):
1101
+ The maximum batch size with which the model will be used.
1102
+ max_cache_len (`int`):
1103
+ The maximum sequence length with which the model will be used.
1104
+ device (`torch.device`, *optional*, defaults to `"cpu"`):
1105
+ The device on which the cache should be initialized. Should be the same as the layer.
1106
+ dtype (*optional*, defaults to `torch.float32`):
1107
+ The default `dtype` to use when initializing the layer.
1108
+
1109
+ Example:
1110
+
1111
+ ```python
1112
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, HybridCache
1113
+
1114
+ >>> model = AutoModelForCausalLM.from_pretrained("google/gemma-2-9b")
1115
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b")
1116
+
1117
+ >>> inputs = tokenizer(text="My name is Gemma", return_tensors="pt")
1118
+
1119
+ >>> # Prepare a cache class and pass it to model's forward
1120
+ >>> # Leave empty space for 10 new tokens, which can be used when calling forward iteratively 10 times to generate
1121
+ >>> max_generated_length = inputs.input_ids.shape[1] + 10
1122
+ >>> past_key_values = HybridCache(config=model.config, max_batch_size=1, max_cache_len=max_generated_length, device=model.device, dtype=model.dtype)
1123
+ >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
1124
+ >>> past_kv_length = outputs.past_key_values # access cache filled with key/values from generation
1125
+ ```
1126
+ """
1127
+
1128
+ def __init__(self, config: PretrainedConfig, max_batch_size, max_cache_len, device="cpu", dtype=None) -> None:
1129
+ super().__init__()
1130
+ if not hasattr(config, "sliding_window") or config.sliding_window is None:
1131
+ raise ValueError(
1132
+ "Setting `cache_implementation` to 'sliding_window' requires the model config supporting "
1133
+ "sliding window attention, please check if there is a `sliding_window` field in the model "
1134
+ "config and it's not set to None."
1135
+ )
1136
+ self.max_cache_len = max_cache_len
1137
+ self.max_batch_size = max_batch_size
1138
+ # Some model define a custom `head_dim` != config.hidden_size // config.num_attention_heads
1139
+ self.head_dim = (
1140
+ config.head_dim if hasattr(config, "head_dim") else config.hidden_size // config.num_attention_heads
1141
+ )
1142
+
1143
+ self.dtype = dtype if dtype is not None else torch.float32
1144
+ self.num_key_value_heads = (
1145
+ config.num_attention_heads if config.num_key_value_heads is None else config.num_key_value_heads
1146
+ )
1147
+ self.is_sliding = torch.tensor(
1148
+ [not bool(i % 2) for i in range(config.num_hidden_layers)], dtype=torch.bool, device=device
1149
+ )
1150
+ self.key_cache: List[torch.Tensor] = []
1151
+ self.value_cache: List[torch.Tensor] = []
1152
+ global_cache_shape = (max_batch_size, self.num_key_value_heads, max_cache_len, self.head_dim)
1153
+ sliding_cache_shape = (
1154
+ max_batch_size,
1155
+ self.num_key_value_heads,
1156
+ min(config.sliding_window, max_cache_len),
1157
+ self.head_dim,
1158
+ )
1159
+ for i in range(config.num_hidden_layers):
1160
+ # Note: `mark_static_address` is used to tag the cache as an fixed data pointer, preventing cuda graph
1161
+ # breaks when updating the cache.
1162
+ cache_shape = global_cache_shape if not self.is_sliding[i] else sliding_cache_shape
1163
+ new_layer_key_cache = torch.zeros(cache_shape, dtype=self.dtype, device=device)
1164
+ new_layer_value_cache = torch.zeros(cache_shape, dtype=self.dtype, device=device)
1165
+ torch._dynamo.mark_static_address(new_layer_key_cache)
1166
+ torch._dynamo.mark_static_address(new_layer_value_cache)
1167
+ self.key_cache.append(new_layer_key_cache)
1168
+ self.value_cache.append(new_layer_value_cache)
1169
+
1170
+ def _sliding_update(self, cache_position, layer_idx, key_states, value_states, k_out, v_out, max_cache_len):
1171
+ if cache_position.shape[0] > max_cache_len:
1172
+ k_out = key_states[:, :, -max_cache_len:, :]
1173
+ v_out = value_states[:, :, -max_cache_len:, :]
1174
+ # Assumption: caches are all zeros at this point, `+=` is equivalent to `=` but compile-friendly
1175
+ self.key_cache[layer_idx] += k_out
1176
+ self.value_cache[layer_idx] += v_out
1177
+ # we should return the whole states instead of k_out, v_out to take the whole prompt
1178
+ # into consideration when building kv cache instead of just throwing away tokens outside of the window
1179
+ return key_states, value_states
1180
+
1181
+ slicing = torch.ones(max_cache_len, dtype=torch.long, device=value_states.device).cumsum(0)
1182
+ cache_position = cache_position.clamp(0, max_cache_len - 1)
1183
+ to_shift = cache_position >= max_cache_len - 1
1184
+ indices = (slicing + to_shift[-1].int() - 1) % max_cache_len
1185
+ k_out = k_out[:, :, indices]
1186
+ v_out = v_out[:, :, indices]
1187
+
1188
+ k_out[:, :, cache_position] = key_states
1189
+ v_out[:, :, cache_position] = value_states
1190
+ # `_.zero()` followed by `+=` is equivalent `=`, but compile-friendly (without graph breaks due to assignment)
1191
+ self.key_cache[layer_idx].zero_()
1192
+ self.value_cache[layer_idx].zero_()
1193
+
1194
+ self.key_cache[layer_idx] += k_out
1195
+ self.value_cache[layer_idx] += v_out
1196
+ return k_out, v_out
1197
+
1198
+ def _static_update(self, cache_position, layer_idx, key_states, value_states, k_out, v_out, max_cache_len):
1199
+ k_out[:, :, cache_position] = key_states
1200
+ v_out[:, :, cache_position] = value_states
1201
+
1202
+ self.key_cache[layer_idx] = k_out
1203
+ self.value_cache[layer_idx] = v_out
1204
+ return k_out, v_out
1205
+
1206
+ def update(
1207
+ self,
1208
+ key_states: torch.Tensor,
1209
+ value_states: torch.Tensor,
1210
+ layer_idx: int,
1211
+ cache_kwargs: Optional[Dict[str, Any]] = None,
1212
+ ) -> Tuple[torch.Tensor]:
1213
+ cache_position = cache_kwargs.get("cache_position")
1214
+ sliding_window = cache_kwargs.get("sliding_window")
1215
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].to(device=key_states.device)
1216
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].to(device=value_states.device)
1217
+ k_out = self.key_cache[layer_idx]
1218
+ v_out = self.value_cache[layer_idx]
1219
+ if sliding_window:
1220
+ update_fn = self._sliding_update
1221
+ else:
1222
+ update_fn = self._static_update
1223
+
1224
+ return update_fn(
1225
+ cache_position,
1226
+ layer_idx,
1227
+ key_states,
1228
+ value_states,
1229
+ k_out,
1230
+ v_out,
1231
+ k_out.shape[2],
1232
+ )
1233
+
1234
+ def get_max_length(self) -> Optional[int]:
1235
+ # in theory there is no limit because the sliding window size is fixed
1236
+ # no matter how long the sentence is
1237
+ return self.max_cache_len
1238
+
1239
+ def get_seq_length(self, layer_idx: Optional[int] = 0):
1240
+ return None
1241
+
1242
+ def reset(self):
1243
+ """Resets the cache values while preserving the objects"""
1244
+ for layer_idx in range(len(self.key_cache)):
1245
+ # In-place ops prevent breaking the static address
1246
+ self.key_cache[layer_idx].zero_()
1247
+ self.value_cache[layer_idx].zero_()
1248
+
1249
+
1250
+ class MambaCache:
1251
+ """
1252
+ Cache for mamba model which does not have attention mechanism and key value states.
1253
+
1254
+ Arguments:
1255
+ config (`PretrainedConfig):
1256
+ The configuration file defining the shape-related attributes required to initialize the static cache.
1257
+ max_batch_size (`int`):
1258
+ The maximum batch size with which the model will be used.
1259
+ dtype (*optional*, defaults to `torch.float16`):
1260
+ The default `dtype` to use when initializing the layer.
1261
+ device (`torch.device`, *optional*):
1262
+ The device on which the cache should be initialized. Should be the same as the layer.
1263
+
1264
+ Attributes:
1265
+ dtype: (`torch.dtype`):
1266
+ The default `dtype` used to initializing the cache.
1267
+ intermediate_size: (`int`):
1268
+ Model's intermediate_size taken from config.
1269
+ ssm_state_size: (`int`):
1270
+ Model's state_size taken from config.
1271
+ conv_kernel_size: (`int`):
1272
+ Model's convolution kernel size taken from config
1273
+ conv_states: (`torch.Tensor`):
1274
+ A tensor of shape `[layer_idx, batch_size, intermediate_size, conv_kernel_size]` that holds convolutional states.
1275
+ ssm_states: (`torch.Tensor`):
1276
+ A tensor of shape `[layer_idx, batch_size, intermediate_size, ssm_state_size]` that holds ssm states
1277
+
1278
+ Example:
1279
+
1280
+ ```python
1281
+ >>> from transformers import AutoTokenizer, MambaForCausalLM, MambaCache
1282
+
1283
+ >>> model = MambaForCausalLM.from_pretrained("state-spaces/mamba-130m-hf")
1284
+ >>> tokenizer = AutoTokenizer.from_pretrained("state-spaces/mamba-130m-hf")
1285
+
1286
+ >>> inputs = tokenizer(text="My name is Mamba", return_tensors="pt")
1287
+
1288
+ >>> # Prepare a cache class and pass it to model's forward
1289
+ >>> past_key_values = MambaCache(config=model.config, max_batch_size=1, device=model.device, dtype=model.dtype)
1290
+ >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
1291
+ >>> past_kv = outputs.past_key_values
1292
+ ```
1293
+ """
1294
+
1295
+ def __init__(
1296
+ self,
1297
+ config: PretrainedConfig,
1298
+ max_batch_size: int,
1299
+ dtype: torch.dtype = torch.float16,
1300
+ device: Optional[str] = None,
1301
+ **kwargs,
1302
+ ):
1303
+ self.dtype = dtype
1304
+ self.max_batch_size = max_batch_size
1305
+ self.intermediate_size = config.intermediate_size
1306
+ self.ssm_state_size = config.state_size
1307
+ self.conv_kernel_size = config.conv_kernel
1308
+
1309
+ self.conv_states: torch.Tensor = torch.zeros(
1310
+ config.num_hidden_layers,
1311
+ self.max_batch_size,
1312
+ self.intermediate_size,
1313
+ self.conv_kernel_size,
1314
+ device=device,
1315
+ dtype=dtype,
1316
+ )
1317
+ self.ssm_states: torch.Tensor = torch.zeros(
1318
+ config.num_hidden_layers,
1319
+ self.max_batch_size,
1320
+ self.intermediate_size,
1321
+ self.ssm_state_size,
1322
+ device=device,
1323
+ dtype=dtype,
1324
+ )
1325
+
1326
+ torch._dynamo.mark_static_address(self.conv_states)
1327
+ torch._dynamo.mark_static_address(self.ssm_states)
1328
+
1329
+ def update_conv_state(
1330
+ self, layer_idx: int, new_conv_state: torch.Tensor, cache_position: torch.LongTensor
1331
+ ) -> torch.Tensor:
1332
+ conv_state = self.conv_states[layer_idx]
1333
+ cache_position = cache_position.clamp(0, self.conv_kernel_size - 1)
1334
+
1335
+ conv_state = conv_state.roll(shifts=-1, dims=-1)
1336
+ conv_state[:, :, cache_position] = new_conv_state.to(conv_state.device)
1337
+ self.conv_states[layer_idx].zero_()
1338
+ self.conv_states[layer_idx] += conv_state
1339
+ return self.conv_states[layer_idx]
1340
+
1341
+ def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor):
1342
+ self.ssm_states[layer_idx] = new_ssm_state.to(self.ssm_states.device)
1343
+ return self.ssm_states[layer_idx]
1344
+
1345
+ def reset(self):
1346
+ self.conv_states.zero_()
1347
+ self.ssm_states.zero_()
transformers_4_44_2__configuration_llama.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """LLaMA model configuration"""
21
+
22
+ from transformers.configuration_utils import PretrainedConfig
23
+ from .transformers_4_44_2__modeling_rope_utils import rope_config_validation
24
+
25
+
26
+ class LlamaConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA
29
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
30
+ defaults will yield a similar configuration to that of the LLaMA-7B.
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 32000):
38
+ Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
39
+ `inputs_ids` passed when calling [`LlamaModel`]
40
+ hidden_size (`int`, *optional*, defaults to 4096):
41
+ Dimension of the hidden representations.
42
+ intermediate_size (`int`, *optional*, defaults to 11008):
43
+ Dimension of the MLP representations.
44
+ num_hidden_layers (`int`, *optional*, defaults to 32):
45
+ Number of hidden layers in the Transformer decoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 32):
47
+ Number of attention heads for each attention layer in the Transformer decoder.
48
+ num_key_value_heads (`int`, *optional*):
49
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
50
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
51
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
52
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
53
+ by meanpooling all the original heads within that group. For more details checkout [this
54
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
55
+ `num_attention_heads`.
56
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
57
+ The non-linear activation function (function or string) in the decoder.
58
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
59
+ The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
60
+ Llama 2 up to 4096, CodeLlama up to 16384.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`.
68
+ pad_token_id (`int`, *optional*):
69
+ Padding token id.
70
+ bos_token_id (`int`, *optional*, defaults to 1):
71
+ Beginning of stream token id.
72
+ eos_token_id (`int`, *optional*, defaults to 2):
73
+ End of stream token id.
74
+ pretraining_tp (`int`, *optional*, defaults to 1):
75
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
76
+ document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
77
+ understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
78
+ results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
79
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
80
+ Whether to tie weight embeddings
81
+ rope_theta (`float`, *optional*, defaults to 10000.0):
82
+ The base period of the RoPE embeddings.
83
+ rope_scaling (`Dict`, *optional*):
84
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
85
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
86
+ accordingly.
87
+ Expected contents:
88
+ `rope_type` (`str`):
89
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
90
+ 'llama3'], with 'default' being the original RoPE implementation.
91
+ `factor` (`float`, *optional*):
92
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
93
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
94
+ original maximum pre-trained length.
95
+ `original_max_position_embeddings` (`int`, *optional*):
96
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
97
+ pretraining.
98
+ `attention_factor` (`float`, *optional*):
99
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
100
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
101
+ `factor` field to infer the suggested value.
102
+ `beta_fast` (`float`, *optional*):
103
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
104
+ ramp function. If unspecified, it defaults to 32.
105
+ `beta_slow` (`float`, *optional*):
106
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
107
+ ramp function. If unspecified, it defaults to 1.
108
+ `short_factor` (`List[float]`, *optional*):
109
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
110
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
111
+ size divided by the number of attention heads divided by 2
112
+ `long_factor` (`List[float]`, *optional*):
113
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
114
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
115
+ size divided by the number of attention heads divided by 2
116
+ `low_freq_factor` (`float`, *optional*):
117
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
118
+ `high_freq_factor` (`float`, *optional*):
119
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
120
+ attention_bias (`bool`, *optional*, defaults to `False`):
121
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
122
+ attention_dropout (`float`, *optional*, defaults to 0.0):
123
+ The dropout ratio for the attention probabilities.
124
+ mlp_bias (`bool`, *optional*, defaults to `False`):
125
+ Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
126
+
127
+ ```python
128
+ >>> from transformers import LlamaModel, LlamaConfig
129
+
130
+ >>> # Initializing a LLaMA llama-7b style configuration
131
+ >>> configuration = LlamaConfig()
132
+
133
+ >>> # Initializing a model from the llama-7b style configuration
134
+ >>> model = LlamaModel(configuration)
135
+
136
+ >>> # Accessing the model configuration
137
+ >>> configuration = model.config
138
+ ```"""
139
+
140
+ model_type = "llama"
141
+ keys_to_ignore_at_inference = ["past_key_values"]
142
+
143
+ def __init__(
144
+ self,
145
+ vocab_size=32000,
146
+ hidden_size=4096,
147
+ intermediate_size=11008,
148
+ num_hidden_layers=32,
149
+ num_attention_heads=32,
150
+ num_key_value_heads=None,
151
+ hidden_act="silu",
152
+ max_position_embeddings=2048,
153
+ initializer_range=0.02,
154
+ rms_norm_eps=1e-6,
155
+ use_cache=True,
156
+ pad_token_id=None,
157
+ bos_token_id=1,
158
+ eos_token_id=2,
159
+ pretraining_tp=1,
160
+ tie_word_embeddings=False,
161
+ rope_theta=10000.0,
162
+ rope_scaling=None,
163
+ attention_bias=False,
164
+ attention_dropout=0.0,
165
+ mlp_bias=False,
166
+ **kwargs,
167
+ ):
168
+ self.vocab_size = vocab_size
169
+ self.max_position_embeddings = max_position_embeddings
170
+ self.hidden_size = hidden_size
171
+ self.intermediate_size = intermediate_size
172
+ self.num_hidden_layers = num_hidden_layers
173
+ self.num_attention_heads = num_attention_heads
174
+
175
+ # for backward compatibility
176
+ if num_key_value_heads is None:
177
+ num_key_value_heads = num_attention_heads
178
+
179
+ self.num_key_value_heads = num_key_value_heads
180
+ self.hidden_act = hidden_act
181
+ self.initializer_range = initializer_range
182
+ self.rms_norm_eps = rms_norm_eps
183
+ self.pretraining_tp = pretraining_tp
184
+ self.use_cache = use_cache
185
+ self.rope_theta = rope_theta
186
+ self.rope_scaling = rope_scaling
187
+ self.attention_bias = attention_bias
188
+ self.attention_dropout = attention_dropout
189
+ self.mlp_bias = mlp_bias
190
+
191
+ # Validate the correctness of rotary position embeddings parameters
192
+ # BC: if there is a 'type' field, move it to 'rope_type'.
193
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
194
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
195
+ rope_config_validation(self)
196
+
197
+ super().__init__(
198
+ pad_token_id=pad_token_id,
199
+ bos_token_id=bos_token_id,
200
+ eos_token_id=eos_token_id,
201
+ tie_word_embeddings=tie_word_embeddings,
202
+ **kwargs,
203
+ )
transformers_4_44_2__modeling_attn_mask_utils.py ADDED
@@ -0,0 +1,482 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import List, Optional, Tuple, Union
16
+
17
+ import torch
18
+
19
+
20
+ @dataclass
21
+ class AttentionMaskConverter:
22
+ """
23
+ A utility attention mask class that allows one to:
24
+ - Create a causal 4d mask
25
+ - Create a causal 4d mask with slided window
26
+ - Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length,
27
+ key_value_length) that can be multiplied with attention scores
28
+
29
+ Examples:
30
+
31
+ ```python
32
+ >>> import torch
33
+ >>> from transformers.modeling_attn_mask_utils import AttentionMaskConverter
34
+
35
+ >>> converter = AttentionMaskConverter(True)
36
+ >>> converter.to_4d(torch.tensor([[0, 0, 0, 1, 1]]), 5, key_value_length=5, dtype=torch.float32)
37
+ tensor([[[[-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
38
+ [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
39
+ [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
40
+ [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, -3.4028e+38],
41
+ [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, 0.0000e+00]]]])
42
+ ```
43
+
44
+ Parameters:
45
+ is_causal (`bool`):
46
+ Whether the attention mask should be a uni-directional (causal) or bi-directional mask.
47
+
48
+ sliding_window (`int`, *optional*):
49
+ Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer.
50
+ """
51
+
52
+ is_causal: bool
53
+ sliding_window: int
54
+
55
+ def __init__(self, is_causal: bool, sliding_window: Optional[int] = None):
56
+ self.is_causal = is_causal
57
+ self.sliding_window = sliding_window
58
+
59
+ if self.sliding_window is not None and self.sliding_window <= 0:
60
+ raise ValueError(
61
+ f"Make sure that when passing `sliding_window` that its value is a strictly positive integer, not `{self.sliding_window}`"
62
+ )
63
+
64
+ def to_causal_4d(
65
+ self,
66
+ batch_size: int,
67
+ query_length: int,
68
+ key_value_length: int,
69
+ dtype: torch.dtype,
70
+ device: Union[torch.device, "str"] = "cpu",
71
+ ) -> Optional[torch.Tensor]:
72
+ """
73
+ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative
74
+ bias to upper right hand triangular matrix (causal mask).
75
+ """
76
+ if not self.is_causal:
77
+ raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.")
78
+
79
+ # If shape is not cached, create a new causal mask and cache it
80
+ input_shape = (batch_size, query_length)
81
+ past_key_values_length = key_value_length - query_length
82
+
83
+ # create causal mask
84
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
85
+ causal_4d_mask = None
86
+ if input_shape[-1] > 1 or self.sliding_window is not None:
87
+ causal_4d_mask = self._make_causal_mask(
88
+ input_shape,
89
+ dtype,
90
+ device=device,
91
+ past_key_values_length=past_key_values_length,
92
+ sliding_window=self.sliding_window,
93
+ )
94
+
95
+ return causal_4d_mask
96
+
97
+ def to_4d(
98
+ self,
99
+ attention_mask_2d: torch.Tensor,
100
+ query_length: int,
101
+ dtype: torch.dtype,
102
+ key_value_length: Optional[int] = None,
103
+ ) -> torch.Tensor:
104
+ """
105
+ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length,
106
+ key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is
107
+ causal, a causal mask will be added.
108
+ """
109
+ input_shape = (attention_mask_2d.shape[0], query_length)
110
+
111
+ # create causal mask
112
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
113
+ causal_4d_mask = None
114
+ if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal:
115
+ if key_value_length is None:
116
+ raise ValueError(
117
+ "This attention mask converter is causal. Make sure to pass `key_value_length` to correctly create a causal mask."
118
+ )
119
+
120
+ past_key_values_length = key_value_length - query_length
121
+ causal_4d_mask = self._make_causal_mask(
122
+ input_shape,
123
+ dtype,
124
+ device=attention_mask_2d.device,
125
+ past_key_values_length=past_key_values_length,
126
+ sliding_window=self.sliding_window,
127
+ )
128
+ elif self.sliding_window is not None:
129
+ raise NotImplementedError("Sliding window is currently only implemented for causal masking")
130
+
131
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
132
+ expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to(
133
+ attention_mask_2d.device
134
+ )
135
+
136
+ if causal_4d_mask is not None:
137
+ expanded_attn_mask = causal_4d_mask.masked_fill(expanded_attn_mask.bool(), torch.finfo(dtype).min)
138
+
139
+ # expanded_attn_mask + causal_4d_mask can cause some overflow
140
+ expanded_4d_mask = expanded_attn_mask
141
+
142
+ return expanded_4d_mask
143
+
144
+ @staticmethod
145
+ def _make_causal_mask(
146
+ input_ids_shape: torch.Size,
147
+ dtype: torch.dtype,
148
+ device: torch.device,
149
+ past_key_values_length: int = 0,
150
+ sliding_window: Optional[int] = None,
151
+ ):
152
+ """
153
+ Make causal mask used for bi-directional self-attention.
154
+ """
155
+ bsz, tgt_len = input_ids_shape
156
+ mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
157
+ mask_cond = torch.arange(mask.size(-1), device=device)
158
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
159
+
160
+ mask = mask.to(dtype)
161
+
162
+ if past_key_values_length > 0:
163
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
164
+
165
+ # add lower triangular sliding window mask if necessary
166
+ if sliding_window is not None:
167
+ diagonal = past_key_values_length - sliding_window - 1
168
+
169
+ context_mask = torch.tril(torch.ones_like(mask, dtype=torch.bool), diagonal=diagonal)
170
+ mask.masked_fill_(context_mask, torch.finfo(dtype).min)
171
+
172
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
173
+
174
+ @staticmethod
175
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
176
+ """
177
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
178
+ """
179
+ bsz, src_len = mask.size()
180
+ tgt_len = tgt_len if tgt_len is not None else src_len
181
+
182
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
183
+
184
+ inverted_mask = 1.0 - expanded_mask
185
+
186
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
187
+
188
+ @staticmethod
189
+ def _unmask_unattended(
190
+ expanded_mask: torch.FloatTensor,
191
+ min_dtype: float,
192
+ ):
193
+ # fmt: off
194
+ """
195
+ Attend to all tokens in masked rows from the expanded attention mask, for example the relevant first rows when
196
+ using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
197
+ Details: https://github.com/pytorch/pytorch/issues/110213
198
+
199
+ `expanded_mask` is [bsz, num_masks, tgt_seq_len, src_seq_len] or [bsz, tgt_seq_len, src_seq_len].
200
+ `attention_mask` is [bsz, src_seq_len].
201
+
202
+ The dimension num_masks of `expanded_mask` is most often 1, but it can also be the number of heads in the case of alibi attention bias.
203
+
204
+ For example, if `expanded_mask` is (e.g. here left-padding case)
205
+ ```
206
+ [[[[0, 0, 0],
207
+ [0, 0, 0],
208
+ [0, 0, 1]]],
209
+ [[[1, 0, 0],
210
+ [1, 1, 0],
211
+ [1, 1, 1]]],
212
+ [[[0, 0, 0],
213
+ [0, 1, 0],
214
+ [0, 1, 1]]]]
215
+ ```
216
+ then the modified `expanded_mask` will be
217
+ ```
218
+ [[[[1, 1, 1], <-- modified
219
+ [1, 1, 1], <-- modified
220
+ [0, 0, 1]]],
221
+ [[[1, 0, 0],
222
+ [1, 1, 0],
223
+ [1, 1, 1]]],
224
+ [[[1, 1, 1], <-- modified
225
+ [0, 1, 0],
226
+ [0, 1, 1]]]]
227
+ ```
228
+ """
229
+ # fmt: on
230
+ if expanded_mask.dtype == torch.bool:
231
+ raise ValueError(
232
+ "AttentionMaskConverter._unmask_unattended expects a float `expanded_mask`, got a BoolTensor."
233
+ )
234
+
235
+ return expanded_mask.mul(~torch.all(expanded_mask == min_dtype, dim=-1, keepdim=True))
236
+
237
+ @staticmethod
238
+ def _ignore_causal_mask_sdpa(
239
+ attention_mask: Optional[torch.Tensor],
240
+ inputs_embeds: torch.Tensor,
241
+ past_key_values_length: int,
242
+ sliding_window: Optional[int] = None,
243
+ is_training: bool = False,
244
+ ) -> bool:
245
+ """
246
+ Detects whether the optional user-specified attention_mask & the automatically created causal mask can be ignored in case PyTorch's SDPA is used, rather relying on SDPA's `is_causal` argument.
247
+
248
+ In case no token is masked in the `attention_mask` argument, if `query_length == 1` or
249
+ `key_value_length == query_length`, we rather rely on SDPA `is_causal` argument to use causal/non-causal masks,
250
+ allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is passed).
251
+ """
252
+
253
+ _, query_length = inputs_embeds.shape[0], inputs_embeds.shape[1]
254
+ key_value_length = query_length + past_key_values_length
255
+
256
+ is_tracing = (
257
+ torch.jit.is_tracing()
258
+ or isinstance(inputs_embeds, torch.fx.Proxy)
259
+ or (hasattr(torch, "_dynamo") and torch._dynamo.is_compiling())
260
+ )
261
+
262
+ ignore_causal_mask = False
263
+
264
+ if attention_mask is None:
265
+ # TODO: When tracing with TorchDynamo with fullgraph=True, the model is recompiled depending on the input shape, thus SDPA's `is_causal` argument is rightfully updated (see https://gist.github.com/fxmarty/1313f39037fc1c112508989628c57363). However, when using `torch.export` or
266
+ # or `torch.onnx.dynamo_export`, we must pass an example input, and `is_causal` behavior is hard-coded. If a user exports a model with q_len > 1, the exported model will hard-code `is_causal=True` which is in general wrong (see https://github.com/pytorch/pytorch/issues/108108).
267
+ # Thus, we only set `ignore_causal_mask = True` if the model is set to training.
268
+ #
269
+ # Besides, jit.trace can not handle the `q_len > 1` condition for `is_causal` ("TypeError: scaled_dot_product_attention(): argument 'is_causal' must be bool, not Tensor").
270
+ if (
271
+ (is_training or not is_tracing)
272
+ and (query_length == 1 or key_value_length == query_length)
273
+ and (sliding_window is None or key_value_length < sliding_window)
274
+ ):
275
+ ignore_causal_mask = True
276
+ elif sliding_window is None or key_value_length < sliding_window:
277
+ if len(attention_mask.shape) == 4:
278
+ return False
279
+ elif (is_training or not is_tracing) and torch.all(attention_mask == 1):
280
+ if query_length == 1 or key_value_length == query_length:
281
+ # For query_length == 1, causal attention and bi-directional attention are the same.
282
+ ignore_causal_mask = True
283
+
284
+ # Unfortunately, for query_length > 1 and key_value_length != query_length, we cannot generally ignore the attention mask, as SDPA causal mask generation
285
+ # may be wrong. We will set `is_causal=False` in SDPA and rely on Transformers attention_mask instead, hence not setting it to None here.
286
+ # Reference: https://github.com/pytorch/pytorch/issues/108108
287
+ # TODO: maybe revisit this with https://github.com/pytorch/pytorch/pull/114823 in PyTorch 2.3.
288
+
289
+ return ignore_causal_mask
290
+
291
+
292
+ def _prepare_4d_causal_attention_mask(
293
+ attention_mask: Optional[torch.Tensor],
294
+ input_shape: Union[torch.Size, Tuple, List],
295
+ inputs_embeds: torch.Tensor,
296
+ past_key_values_length: int,
297
+ sliding_window: Optional[int] = None,
298
+ ):
299
+ """
300
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
301
+ `(batch_size, key_value_length)`
302
+
303
+ Args:
304
+ attention_mask (`torch.Tensor` or `None`):
305
+ A 2D attention mask of shape `(batch_size, key_value_length)`
306
+ input_shape (`tuple(int)` or `list(int)` or `torch.Size`):
307
+ The input shape should be a tuple that defines `(batch_size, query_length)`.
308
+ inputs_embeds (`torch.Tensor`):
309
+ The embedded inputs as a torch Tensor.
310
+ past_key_values_length (`int`):
311
+ The length of the key value cache.
312
+ sliding_window (`int`, *optional*):
313
+ If the model uses windowed attention, a sliding window should be passed.
314
+ """
315
+ attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
316
+
317
+ key_value_length = input_shape[-1] + past_key_values_length
318
+
319
+ # 4d mask is passed through the layers
320
+ if attention_mask is not None and len(attention_mask.shape) == 2:
321
+ attention_mask = attn_mask_converter.to_4d(
322
+ attention_mask, input_shape[-1], key_value_length=key_value_length, dtype=inputs_embeds.dtype
323
+ )
324
+ elif attention_mask is not None and len(attention_mask.shape) == 4:
325
+ expected_shape = (input_shape[0], 1, input_shape[1], key_value_length)
326
+ if tuple(attention_mask.shape) != expected_shape:
327
+ raise ValueError(
328
+ f"Incorrect 4D attention_mask shape: {tuple(attention_mask.shape)}; expected: {expected_shape}."
329
+ )
330
+ else:
331
+ # if the 4D mask has correct shape - invert it and fill with negative infinity
332
+ inverted_mask = 1.0 - attention_mask
333
+ attention_mask = inverted_mask.masked_fill(
334
+ inverted_mask.to(torch.bool), torch.finfo(inputs_embeds.dtype).min
335
+ )
336
+ else:
337
+ attention_mask = attn_mask_converter.to_causal_4d(
338
+ input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device
339
+ )
340
+
341
+ return attention_mask
342
+
343
+
344
+ # Adapted from _prepare_4d_causal_attention_mask
345
+ def _prepare_4d_causal_attention_mask_for_sdpa(
346
+ attention_mask: Optional[torch.Tensor],
347
+ input_shape: Union[torch.Size, Tuple, List],
348
+ inputs_embeds: torch.Tensor,
349
+ past_key_values_length: int,
350
+ sliding_window: Optional[int] = None,
351
+ ):
352
+ """
353
+ Prepares the correct `attn_mask` argument to be used by `torch.nn.functional.scaled_dot_product_attention`.
354
+
355
+ In case no token is masked in the `attention_mask` argument, we simply set it to `None` for the cases `query_length == 1` and
356
+ `key_value_length == query_length`, and rely instead on SDPA `is_causal` argument to use causal/non-causal masks,
357
+ allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is passed).
358
+ """
359
+ attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
360
+
361
+ key_value_length = input_shape[-1] + past_key_values_length
362
+
363
+ # torch.jit.trace, symbolic_trace and torchdynamo with fullgraph=True are unable to capture the controlflow `is_causal=attention_mask is None and q_len > 1`
364
+ # used as an SDPA argument. We keep compatibility with these tracing tools by always using SDPA's `attn_mask` argument in case we are tracing.
365
+ # TODO: For dynamo, rather use a check on fullgraph=True once this is possible (https://github.com/pytorch/pytorch/pull/120400).
366
+ is_tracing = (
367
+ torch.jit.is_tracing()
368
+ or isinstance(inputs_embeds, torch.fx.Proxy)
369
+ or (hasattr(torch, "_dynamo") and torch._dynamo.is_compiling())
370
+ )
371
+
372
+ ignore_causal_mask = AttentionMaskConverter._ignore_causal_mask_sdpa(
373
+ attention_mask=attention_mask,
374
+ inputs_embeds=inputs_embeds,
375
+ past_key_values_length=past_key_values_length,
376
+ sliding_window=sliding_window,
377
+ )
378
+
379
+ if ignore_causal_mask:
380
+ expanded_4d_mask = None
381
+ elif attention_mask is None:
382
+ expanded_4d_mask = attn_mask_converter.to_causal_4d(
383
+ input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device
384
+ )
385
+ else:
386
+ if attention_mask.dim() == 4:
387
+ # in this case we assume that the mask comes already in inverted form and requires no inversion or slicing
388
+ if attention_mask.max() != 0:
389
+ raise ValueError("Custom 4D attention mask should be passed in inverted form with max==0`")
390
+ expanded_4d_mask = attention_mask
391
+ else:
392
+ expanded_4d_mask = attn_mask_converter.to_4d(
393
+ attention_mask,
394
+ input_shape[-1],
395
+ dtype=inputs_embeds.dtype,
396
+ key_value_length=key_value_length,
397
+ )
398
+
399
+ # Attend to all tokens in masked rows from the causal_mask, for example the relevant first rows when
400
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
401
+ # Details: https://github.com/pytorch/pytorch/issues/110213
402
+ if not is_tracing and expanded_4d_mask.device.type == "cuda":
403
+ expanded_4d_mask = AttentionMaskConverter._unmask_unattended(
404
+ expanded_4d_mask, min_dtype=torch.finfo(inputs_embeds.dtype).min
405
+ )
406
+
407
+ return expanded_4d_mask
408
+
409
+
410
+ def _prepare_4d_attention_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
411
+ """
412
+ Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
413
+ `(batch_size, key_value_length)`
414
+
415
+ Args:
416
+ mask (`torch.Tensor`):
417
+ A 2D attention mask of shape `(batch_size, key_value_length)`
418
+ dtype (`torch.dtype`):
419
+ The torch dtype the created mask shall have.
420
+ tgt_len (`int`):
421
+ The target length or query length the created mask shall have.
422
+ """
423
+ return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
424
+
425
+
426
+ def _prepare_4d_attention_mask_for_sdpa(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
427
+ """
428
+ Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
429
+ `(batch_size, key_value_length)`
430
+
431
+ Args:
432
+ mask (`torch.Tensor`):
433
+ A 2D attention mask of shape `(batch_size, key_value_length)`
434
+ dtype (`torch.dtype`):
435
+ The torch dtype the created mask shall have.
436
+ tgt_len (`int`):
437
+ The target length or query length the created mask shall have.
438
+ """
439
+ _, key_value_length = mask.shape
440
+ tgt_len = tgt_len if tgt_len is not None else key_value_length
441
+
442
+ is_tracing = (
443
+ torch.jit.is_tracing()
444
+ or isinstance(mask, torch.fx.Proxy)
445
+ or (hasattr(torch, "_dynamo") and torch._dynamo.is_compiling())
446
+ )
447
+
448
+ # torch.jit.trace, symbolic_trace and torchdynamo with fullgraph=True are unable to capture data-dependent controlflows.
449
+ if not is_tracing and torch.all(mask == 1):
450
+ return None
451
+ else:
452
+ return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
453
+
454
+
455
+ def _create_4d_causal_attention_mask(
456
+ input_shape: Union[torch.Size, Tuple, List],
457
+ dtype: torch.dtype,
458
+ device: torch.device,
459
+ past_key_values_length: int = 0,
460
+ sliding_window: Optional[int] = None,
461
+ ) -> Optional[torch.Tensor]:
462
+ """
463
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)`
464
+
465
+ Args:
466
+ input_shape (`tuple(int)` or `list(int)` or `torch.Size`):
467
+ The input shape should be a tuple that defines `(batch_size, query_length)`.
468
+ dtype (`torch.dtype`):
469
+ The torch dtype the created mask shall have.
470
+ device (`int`):
471
+ The torch device the created mask shall have.
472
+ sliding_window (`int`, *optional*):
473
+ If the model uses windowed attention, a sliding window should be passed.
474
+ """
475
+ attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
476
+
477
+ key_value_length = past_key_values_length + input_shape[-1]
478
+ attention_mask = attn_mask_converter.to_causal_4d(
479
+ input_shape[0], input_shape[-1], key_value_length, dtype=dtype, device=device
480
+ )
481
+
482
+ return attention_mask
transformers_4_44_2__modeling_flash_attention_utils_backward_compat.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import inspect
17
+ import os
18
+ from typing import Optional, Tuple, Union
19
+
20
+
21
+ import torch
22
+ import torch.nn.functional as F
23
+
24
+ from functools import lru_cache
25
+ import importlib.metadata
26
+ import importlib.util
27
+ from packaging import version
28
+
29
+ from transformers.utils import is_flash_attn_2_available
30
+
31
+
32
+ if is_flash_attn_2_available():
33
+ try:
34
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
35
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
36
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
37
+ except ImportError:
38
+ raise "Unable to import flash_attn"
39
+
40
+
41
+ def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[Tuple[bool, str], bool]:
42
+ # Check if the package spec exists and grab its version to avoid importing a local directory
43
+ package_exists = importlib.util.find_spec(pkg_name) is not None
44
+ package_version = "N/A"
45
+ if package_exists:
46
+ try:
47
+ # Primary method to get the package version
48
+ package_version = importlib.metadata.version(pkg_name)
49
+ except importlib.metadata.PackageNotFoundError:
50
+ # Fallback method: Only for "torch" and versions containing "dev"
51
+ if pkg_name == "torch":
52
+ try:
53
+ package = importlib.import_module(pkg_name)
54
+ temp_version = getattr(package, "__version__", "N/A")
55
+ # Check if the version contains "dev"
56
+ if "dev" in temp_version:
57
+ package_version = temp_version
58
+ package_exists = True
59
+ else:
60
+ package_exists = False
61
+ except ImportError:
62
+ # If the package can't be imported, it's not available
63
+ package_exists = False
64
+ else:
65
+ # For packages other than "torch", don't attempt the fallback and set as not available
66
+ package_exists = False
67
+ if return_version:
68
+ return package_exists, package_version
69
+ else:
70
+ return package_exists
71
+
72
+
73
+ @lru_cache()
74
+ def is_flash_attn_greater_or_equal(library_version: str):
75
+ if not _is_package_available("flash_attn"):
76
+ return False
77
+
78
+ return version.parse(importlib.metadata.version("flash_attn")) >= version.parse(library_version)
79
+
80
+
81
+ def _get_unpad_data(attention_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, int]:
82
+ """
83
+ Retrieves indexing data required to repad unpadded (ragged) tensors.
84
+
85
+ Arguments:
86
+ attention_mask (`torch.Tensor`):
87
+ Boolean or int tensor of shape (batch_size, sequence_length), 1 means valid and 0 means not valid.
88
+
89
+ Return:
90
+ indices (`torch.Tensor`):
91
+ The indices of non-masked tokens from the flattened input sequence.
92
+ cu_seqlens (`torch.Tensor`):
93
+ The cumulative sequence lengths, used to index into ragged (unpadded) tensors. `cu_seqlens` shape is (batch_size + 1,).
94
+ max_seqlen_in_batch (`int`):
95
+ Maximum sequence length in batch.
96
+ """
97
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
98
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
99
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
100
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
101
+ return (
102
+ indices,
103
+ cu_seqlens,
104
+ max_seqlen_in_batch,
105
+ )
106
+
107
+
108
+ def _upad_input(
109
+ query_layer: torch.Tensor,
110
+ key_layer: torch.Tensor,
111
+ value_layer: torch.Tensor,
112
+ attention_mask: torch.Tensor,
113
+ query_length: int,
114
+ ):
115
+ """
116
+ Unpads query, key, and values tensors, using a single dimension for all tokens even though they belong to different batches.
117
+
118
+ This function is used instead of `flash_attn.bert_padding.unpad_input` in order to avoid the recomputation of the same intermediary
119
+ tensors for query, key, value tensors.
120
+
121
+ Arguments:
122
+ query_layer (`torch.Tensor`):
123
+ Query state with padding. Shape: (batch_size, query_length, num_heads, head_dim).
124
+ key_layer (`torch.Tensor`):
125
+ Key state with padding. Shape: (batch_size, kv_seq_len, num_key_value_heads, head_dim).
126
+ value_layer (`torch.Tensor`):
127
+ Value state with padding. Shape: (batch_size, kv_seq_len, num_key_value_heads, head_dim).
128
+ attention_mask (`torch.Tensor`):
129
+ Boolean or int tensor of shape (batch_size, sequence_length), 1 means valid and 0 means not valid.
130
+ query_length (`int`):
131
+ Target length.
132
+
133
+ Return:
134
+ query_layer (`torch.Tensor`):
135
+ Query state without padding. Shape: (total_target_length, num_heads, head_dim).
136
+ key_layer (`torch.Tensor`):
137
+ Key state with padding. Shape: (total_source_length, num_key_value_heads, head_dim).
138
+ value_layer (`torch.Tensor`):
139
+ Value state with padding. Shape: (total_source_length, num_key_value_heads, head_dim).
140
+ indices_q (`torch.Tensor`):
141
+ The indices of non-masked tokens from the flattened input target sequence.
142
+ (cu_seqlens_q, cu_seqlens_k) (`Tuple[int]`):
143
+ The cumulative sequence lengths for the target (query) and source (key, value), used to index into ragged (unpadded) tensors. `cu_seqlens` shape is (batch_size + 1,).
144
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k) (`Tuple[int]`):
145
+ Maximum sequence length in batch (`max_seqlen_in_batch_q` for the target sequence i.e. query, `max_seqlen_in_batch_k` for the source sequence i.e. key/value).
146
+ """
147
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
148
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
149
+
150
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k)
151
+ value_layer = index_first_axis(
152
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
153
+ )
154
+ if query_length == kv_seq_len:
155
+ query_layer = index_first_axis(query_layer.reshape(batch_size * kv_seq_len, -1, head_dim), indices_k)
156
+ cu_seqlens_q = cu_seqlens_k
157
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
158
+ indices_q = indices_k
159
+ elif query_length == 1:
160
+ max_seqlen_in_batch_q = 1
161
+ cu_seqlens_q = torch.arange(
162
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
163
+ ) # There is a memcpy here, that is very bad.
164
+ indices_q = cu_seqlens_q[:-1]
165
+ query_layer = query_layer.squeeze(1)
166
+ else:
167
+ # The -q_len: slice assumes left padding.
168
+ attention_mask = attention_mask[:, -query_length:]
169
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
170
+
171
+ return (
172
+ query_layer,
173
+ key_layer,
174
+ value_layer,
175
+ indices_q,
176
+ (cu_seqlens_q, cu_seqlens_k),
177
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
178
+ )
179
+
180
+
181
+ def prepare_fa2_from_position_ids(query, key, value, position_ids):
182
+ """
183
+ This function returns necessary arguments to call `flash_attn_varlen_func`.
184
+ All three query, key, value states will be flattened.
185
+ Cummulative lengths of each examples in the batch will be extracted from position_ids.
186
+
187
+ NOTE: ideally cummulative lengths should be prepared at the data collator stage
188
+
189
+ Arguments:
190
+ query (`torch.Tensor`):
191
+ Query state with padding. Shape: (batch_size, query_length, num_heads, head_dim).
192
+ key (`torch.Tensor`):
193
+ Key state with padding. Shape: (batch_size, kv_seq_len, num_key_value_heads, head_dim).
194
+ value (`torch.Tensor`):
195
+ Value state with padding. Shape: (batch_size, kv_seq_len, num_key_value_heads, head_dim).
196
+ position_ids (`torch.Tensor`):
197
+ Boolean or int tensor of shape (batch_size, sequence_length), 1 means valid and 0 means not valid.
198
+
199
+ Return:
200
+ query (`torch.Tensor`):
201
+ Query state without padding. Shape: (total_target_length, num_heads, head_dim).
202
+ key (`torch.Tensor`):
203
+ Key state with padding. Shape: (total_source_length, num_key_value_heads, head_dim).
204
+ value (`torch.Tensor`):
205
+ Value state with padding. Shape: (total_source_length, num_key_value_heads, head_dim).
206
+ indices_q (`torch.Tensor`):
207
+ The indices of non-masked tokens from the flattened input target sequence.
208
+ (cu_seqlens_q, cu_seqlens_k) (`Tuple[int]`):
209
+ The cumulative sequence lengths for the target (query) and source (key, value), used to index into ragged (unpadded) tensors. `cu_seqlens` shape is (batch_size + 1,).
210
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k) (`Tuple[int]`):
211
+ Maximum sequence length in batch (`max_seqlen_in_batch_q` for the target sequence i.e. query, `max_seqlen_in_batch_k` for the source sequence i.e. key/value).
212
+ """
213
+ query = query.view(-1, query.size(-2), query.size(-1))
214
+ key = key.view(-1, key.size(-2), key.size(-1))
215
+ value = value.view(-1, value.size(-2), value.size(-1))
216
+ position_ids = position_ids.flatten()
217
+ indices_q = torch.arange(position_ids.size(0), device=position_ids.device, dtype=torch.int32)
218
+
219
+ cu_seq_lens = torch.cat(
220
+ (
221
+ indices_q[position_ids == 0],
222
+ torch.tensor(position_ids.size(), device=position_ids.device, dtype=torch.int32),
223
+ )
224
+ )
225
+
226
+ max_length = position_ids.max() + 1
227
+
228
+ return (query, key, value, indices_q, (cu_seq_lens, cu_seq_lens), (max_length, max_length))
229
+
230
+
231
+ def _flash_attention_forward(
232
+ query_states: torch.Tensor,
233
+ key_states: torch.Tensor,
234
+ value_states: torch.Tensor,
235
+ attention_mask: torch.Tensor,
236
+ query_length: int,
237
+ is_causal: bool,
238
+ dropout: float = 0.0,
239
+ position_ids: Optional[torch.Tensor] = None,
240
+ softmax_scale: Optional[float] = None,
241
+ sliding_window: Optional[int] = None,
242
+ use_top_left_mask: bool = False,
243
+ softcap: Optional[float] = None,
244
+ deterministic: bool = None,
245
+ ):
246
+ """
247
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
248
+ first unpad the input, then computes the attention scores and pad the final attention scores.
249
+
250
+ Args:
251
+ query_states (`torch.Tensor`):
252
+ Input query states to be passed to Flash Attention API
253
+ key_states (`torch.Tensor`):
254
+ Input key states to be passed to Flash Attention API
255
+ value_states (`torch.Tensor`):
256
+ Input value states to be passed to Flash Attention API
257
+ attention_mask (`torch.Tensor`):
258
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
259
+ position of padding tokens and 1 for the position of non-padding tokens.
260
+ dropout (`float`):
261
+ Attention dropout
262
+ softmax_scale (`float`, *optional*):
263
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
264
+ use_top_left_mask (`bool`, defaults to `False`):
265
+ flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference.
266
+ softcap (`float`, *optional*):
267
+ Softcap for the attention logits, used e.g. in gemma2.
268
+ deterministic (`bool`, *optional*):
269
+ Determines if the deterministic option introduced in flash_attn>=2.4.1 is enabled.
270
+ """
271
+ if not use_top_left_mask:
272
+ causal = is_causal
273
+ else:
274
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__.
275
+ causal = is_causal and query_length != 1
276
+
277
+ # Assuming 4D tensors, key_states.shape[1] is the key/value sequence length (source length).
278
+ use_sliding_windows = (
279
+ _flash_supports_window_size and sliding_window is not None and key_states.shape[1] > sliding_window
280
+ )
281
+ flash_kwargs = {"window_size": (sliding_window, sliding_window)} if use_sliding_windows else {}
282
+
283
+ if is_flash_attn_greater_or_equal("2.4.1"):
284
+ if deterministic is None:
285
+ deterministic = os.environ.get("FLASH_ATTENTION_DETERMINISTIC", "0") == "1"
286
+ flash_kwargs["deterministic"] = deterministic
287
+
288
+ if softcap is not None:
289
+ flash_kwargs["softcap"] = softcap
290
+
291
+ # Contains at least one padding token in the sequence
292
+ if attention_mask is not None:
293
+ batch_size = query_states.shape[0]
294
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = _upad_input(
295
+ query_states, key_states, value_states, attention_mask, query_length
296
+ )
297
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
298
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
299
+
300
+ attn_output_unpad = flash_attn_varlen_func(
301
+ query_states,
302
+ key_states,
303
+ value_states,
304
+ cu_seqlens_q=cu_seqlens_q,
305
+ cu_seqlens_k=cu_seqlens_k,
306
+ max_seqlen_q=max_seqlen_in_batch_q,
307
+ max_seqlen_k=max_seqlen_in_batch_k,
308
+ dropout_p=dropout,
309
+ softmax_scale=softmax_scale,
310
+ causal=causal,
311
+ **flash_kwargs,
312
+ )
313
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
314
+
315
+ # If position_ids is provided and check all examples do not contain only 1 sequence, If tensor in increasing
316
+ # then we probably have one sequence, otherwise it is packed. Additionally check we are in pre-fill/training stage.
317
+ # Use `flash_attn_varlen_func` to prevent cross-example attention and also allow padding free approach
318
+ elif position_ids is not None and query_length != 1 and not (torch.diff(position_ids, dim=-1) >= 0).all():
319
+ batch_size = query_states.size(0)
320
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = prepare_fa2_from_position_ids(
321
+ query_states, key_states, value_states, position_ids
322
+ )
323
+
324
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
325
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
326
+
327
+ attn_output = flash_attn_varlen_func(
328
+ query_states,
329
+ key_states,
330
+ value_states,
331
+ cu_seqlens_q=cu_seqlens_q,
332
+ cu_seqlens_k=cu_seqlens_k,
333
+ max_seqlen_q=max_seqlen_in_batch_q,
334
+ max_seqlen_k=max_seqlen_in_batch_k,
335
+ dropout_p=dropout,
336
+ softmax_scale=softmax_scale,
337
+ causal=causal,
338
+ **flash_kwargs,
339
+ )
340
+
341
+ attn_output = attn_output.view(batch_size, -1, attn_output.size(-2), attn_output.size(-1))
342
+
343
+ else:
344
+ attn_output = flash_attn_func(
345
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal, **flash_kwargs
346
+ )
347
+
348
+ return attn_output
transformers_4_44_2__modeling_outputs.py ADDED
The diff for this file is too large to render. See raw diff
 
transformers_4_44_2__modeling_rope_utils.py ADDED
@@ -0,0 +1,559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import Optional, Tuple
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import is_torch_available, logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ if is_torch_available():
26
+ import torch
27
+
28
+
29
+ def _compute_default_rope_parameters(
30
+ config: Optional[PretrainedConfig] = None,
31
+ device: Optional["torch.device"] = None,
32
+ seq_len: Optional[int] = None,
33
+ **rope_kwargs,
34
+ ) -> Tuple["torch.Tensor", float]:
35
+ """
36
+ Computes the inverse frequencies according to the original RoPE implementation
37
+ Args:
38
+ config ([`~transformers.PretrainedConfig`]):
39
+ The model configuration.
40
+ device (`torch.device`):
41
+ The device to use for initialization of the inverse frequencies.
42
+ seq_len (`int`, *optional*):
43
+ The current sequence length. Unused for this type of RoPE.
44
+ rope_kwargs (`Dict`, *optional*):
45
+ BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.
46
+ Returns:
47
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
48
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
49
+ """
50
+ if config is not None and len(rope_kwargs) > 0:
51
+ raise ValueError(
52
+ "Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in "
53
+ f"`_compute_default_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}"
54
+ )
55
+ if len(rope_kwargs) > 0:
56
+ base = rope_kwargs["base"]
57
+ dim = rope_kwargs["dim"]
58
+ elif config is not None:
59
+ base = config.rope_theta
60
+ partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0
61
+ head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
62
+ dim = int(head_dim * partial_rotary_factor)
63
+
64
+ attention_factor = 1.0 # Unused in this type of RoPE
65
+
66
+ # Compute the inverse frequencies
67
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).float().to(device) / dim))
68
+ return inv_freq, attention_factor
69
+
70
+
71
+ def _compute_linear_scaling_rope_parameters(
72
+ config: Optional[PretrainedConfig] = None,
73
+ device: Optional["torch.device"] = None,
74
+ seq_len: Optional[int] = None,
75
+ **rope_kwargs,
76
+ ) -> Tuple["torch.Tensor", float]:
77
+ """
78
+ Computes the inverse frequencies with linear scaling. Credits to the Reddit user /u/kaiokendev
79
+ Args:
80
+ config ([`~transformers.PretrainedConfig`]):
81
+ The model configuration.
82
+ device (`torch.device`):
83
+ The device to use for initialization of the inverse frequencies.
84
+ seq_len (`int`, *optional*):
85
+ The current sequence length. Unused for this type of RoPE.
86
+ rope_kwargs (`Dict`, *optional*):
87
+ BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.
88
+ Returns:
89
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
90
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
91
+ """
92
+ if config is not None and len(rope_kwargs) > 0:
93
+ raise ValueError(
94
+ "Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in "
95
+ f"`_compute_linear_scaling_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}"
96
+ )
97
+ if len(rope_kwargs) > 0:
98
+ factor = rope_kwargs["factor"]
99
+ elif config is not None:
100
+ factor = config.rope_scaling["factor"]
101
+
102
+ # Gets the default RoPE parameters
103
+ inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs)
104
+
105
+ # Then applies linear scaling to the frequencies.
106
+ # NOTE: originally, scaling was applied to the position_ids. However, we get `embs = inv_freq @ position_ids`, so
107
+ # applying scaling to the inverse frequencies is equivalent.
108
+ inv_freq /= factor
109
+ return inv_freq, attention_factor
110
+
111
+
112
+ def _compute_dynamic_ntk_parameters(
113
+ config: Optional[PretrainedConfig] = None,
114
+ device: Optional["torch.device"] = None,
115
+ seq_len: Optional[int] = None,
116
+ **rope_kwargs,
117
+ ) -> Tuple["torch.Tensor", float]:
118
+ """
119
+ Computes the inverse frequencies with NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla
120
+ Args:
121
+ config ([`~transformers.PretrainedConfig`]):
122
+ The model configuration.
123
+ device (`torch.device`):
124
+ The device to use for initialization of the inverse frequencies.
125
+ seq_len (`int`, *optional*):
126
+ The current sequence length, used to update the dynamic RoPE at inference time.
127
+ rope_kwargs (`Dict`, *optional*):
128
+ BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.
129
+ Returns:
130
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
131
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
132
+ """
133
+ # TODO (joao): use the new `original_max_position_embeddings` from rope_scaling
134
+ if config is not None and len(rope_kwargs) > 0:
135
+ raise ValueError(
136
+ "Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in "
137
+ f"`_compute_dynamic_ntk_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}"
138
+ )
139
+ if len(rope_kwargs) > 0:
140
+ base = rope_kwargs["base"]
141
+ dim = rope_kwargs["dim"]
142
+ max_position_embeddings = rope_kwargs["max_position_embeddings"]
143
+ factor = rope_kwargs["factor"]
144
+ elif config is not None:
145
+ base = config.rope_theta
146
+ partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0
147
+ head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
148
+ dim = int(head_dim * partial_rotary_factor)
149
+ max_position_embeddings = config.max_position_embeddings
150
+ factor = config.rope_scaling["factor"]
151
+
152
+ attention_factor = 1.0 # Unused in this type of RoPE
153
+
154
+ # seq_len: default to max_position_embeddings, e.g. at init time
155
+ seq_len = seq_len if seq_len is not None and seq_len > max_position_embeddings else max_position_embeddings
156
+
157
+ # Compute the inverse frequencies
158
+ base = base * ((factor * seq_len / max_position_embeddings) - (factor - 1)) ** (dim / (dim - 2))
159
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).float().to(device) / dim))
160
+ return inv_freq, attention_factor
161
+
162
+
163
+ def _compute_yarn_parameters(
164
+ config: PretrainedConfig, device: "torch.device", seq_len: Optional[int] = None, **rope_kwargs
165
+ ) -> Tuple["torch.Tensor", float]:
166
+ """
167
+ Computes the inverse frequencies with NTK scaling. Please refer to the
168
+ [original paper](https://arxiv.org/abs/2309.00071)
169
+ Args:
170
+ config ([`~transformers.PretrainedConfig`]):
171
+ The model configuration.
172
+ device (`torch.device`):
173
+ The device to use for initialization of the inverse frequencies.
174
+ seq_len (`int`, *optional*):
175
+ The current sequence length. Unused for this type of RoPE.
176
+ rope_kwargs (`Dict`, *optional*):
177
+ BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.
178
+ Returns:
179
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
180
+ post-processing scaling factor applied to the computed cos/sin.
181
+ """
182
+ # No need to keep BC with yarn, unreleased when this new pattern was created.
183
+ if len(rope_kwargs) > 0:
184
+ raise ValueError(
185
+ f"Unexpected arguments: `**rope_kwargs` should be unset in `_compute_yarn_parameters`, got {rope_kwargs}"
186
+ )
187
+
188
+ base = config.rope_theta
189
+ partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0
190
+ head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
191
+ dim = int(head_dim * partial_rotary_factor)
192
+ max_position_embeddings = config.max_position_embeddings
193
+ factor = config.rope_scaling["factor"]
194
+
195
+ # Sets the attention factor as suggested in the paper
196
+ attention_factor = config.rope_scaling.get("attention_factor")
197
+ if attention_factor is None:
198
+ attention_factor = 0.1 * math.log(factor) + 1.0
199
+
200
+ # Optional config options
201
+ # beta_fast/beta_slow: as suggested in the paper, default to 32/1 (correspondingly)
202
+ beta_fast = config.rope_scaling.get("beta_fast") or 32
203
+ beta_slow = config.rope_scaling.get("beta_slow") or 1
204
+
205
+ # Compute the inverse frequencies
206
+ def find_correction_dim(num_rotations, dim, base, max_position_embeddings):
207
+ """Inverse dimension formula to find the dimension based on the number of rotations"""
208
+ return (dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi))) / (2 * math.log(base))
209
+
210
+ def find_correction_range(low_rot, high_rot, dim, base, max_position_embeddings):
211
+ """Find dimension range bounds based on rotations"""
212
+ low = math.floor(find_correction_dim(low_rot, dim, base, max_position_embeddings))
213
+ high = math.ceil(find_correction_dim(high_rot, dim, base, max_position_embeddings))
214
+ return max(low, 0), min(high, dim - 1)
215
+
216
+ def linear_ramp_factor(min, max, dim):
217
+ if min == max:
218
+ max += 0.001 # Prevent singularity
219
+
220
+ linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
221
+ ramp_func = torch.clamp(linear_func, 0, 1)
222
+ return ramp_func
223
+
224
+ # Note on variable naming: "interpolation" comes from the original technique, where we interpolate the position IDs
225
+ # to expand the possible context length. In other words, interpolation = apply scaling factor.
226
+ pos_freqs = base ** (torch.arange(0, dim, 2).float().to(device) / dim)
227
+ inv_freq_extrapolation = 1.0 / pos_freqs
228
+ inv_freq_interpolation = 1.0 / (factor * pos_freqs)
229
+
230
+ low, high = find_correction_range(beta_fast, beta_slow, dim, base, max_position_embeddings)
231
+
232
+ # Get n-dimensional rotational scaling corrected for extrapolation
233
+ inv_freq_extrapolation_factor = 1 - linear_ramp_factor(low, high, dim // 2).float().to(device)
234
+ inv_freq = (
235
+ inv_freq_interpolation * (1 - inv_freq_extrapolation_factor)
236
+ + inv_freq_extrapolation * inv_freq_extrapolation_factor
237
+ )
238
+
239
+ return inv_freq, attention_factor
240
+
241
+
242
+ def _compute_longrope_parameters(
243
+ config: PretrainedConfig, device: "torch.device", seq_len: Optional[int] = None, **rope_kwargs
244
+ ) -> Tuple["torch.Tensor", float]:
245
+ """
246
+ Computes the inverse frequencies with LongRoPE scaling. Please refer to the
247
+ [original implementation](https://github.com/microsoft/LongRoPE)
248
+ Args:
249
+ config ([`~transformers.PretrainedConfig`]):
250
+ The model configuration.
251
+ device (`torch.device`):
252
+ The device to use for initialization of the inverse frequencies.
253
+ seq_len (`int`, *optional*):
254
+ The current sequence length. Unused for this type of RoPE.
255
+ rope_kwargs (`Dict`, *optional*):
256
+ BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.
257
+ Returns:
258
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
259
+ post-processing scaling factor applied to the computed cos/sin.
260
+ """
261
+ # TODO (joao): use the new `original_max_position_embeddings` from rope_scaling
262
+ # No need to keep BC with longrope, unreleased when this new pattern was created.
263
+ if len(rope_kwargs) > 0:
264
+ raise ValueError(
265
+ "Unexpected arguments: `**rope_kwargs` should be unset in `_compute_longrope_parameters`, got "
266
+ f"{rope_kwargs}"
267
+ )
268
+
269
+ base = config.rope_theta
270
+ partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0
271
+ head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
272
+ dim = int(head_dim * partial_rotary_factor)
273
+ long_factor = config.rope_scaling["long_factor"]
274
+ short_factor = config.rope_scaling["short_factor"]
275
+ factor = config.rope_scaling.get("factor")
276
+ attention_factor = config.rope_scaling.get("attention_factor")
277
+
278
+ # NOTE: Phi3 (and potentially other models) modify `max_position_embeddings` and have a
279
+ # `original_max_position_embeddings` field containing the pretrained value. They use the ratio between these two
280
+ # values to compute the default attention scaling factor, instead of using `factor`.
281
+ if hasattr(config, "original_max_position_embeddings"):
282
+ max_position_embeddings = config.original_max_position_embeddings
283
+ expanded_max_position_embeddings = config.max_position_embeddings
284
+ factor = expanded_max_position_embeddings / max_position_embeddings
285
+ else:
286
+ max_position_embeddings = config.max_position_embeddings
287
+ expanded_max_position_embeddings = max_position_embeddings * factor
288
+
289
+ # Sets the attention factor as suggested in the paper
290
+ if attention_factor is None:
291
+ if factor <= 1.0:
292
+ attention_factor = 1.0
293
+ else:
294
+ attention_factor = math.sqrt(1 + math.log(factor) / math.log(max_position_embeddings))
295
+
296
+ # Compute the inverse frequencies -- scaled based on the target sequence length
297
+ if expanded_max_position_embeddings > max_position_embeddings:
298
+ ext_factors = torch.tensor(long_factor, dtype=torch.float32, device=device)
299
+ else:
300
+ ext_factors = torch.tensor(short_factor, dtype=torch.float32, device=device)
301
+ inv_freq_shape = torch.arange(0, dim, 2, dtype=torch.int64, device=device).float() / dim
302
+ inv_freq = 1.0 / (ext_factors * base**inv_freq_shape)
303
+
304
+ return inv_freq, attention_factor
305
+
306
+
307
+ def _compute_llama3_parameters(
308
+ config: PretrainedConfig, device: "torch.device", seq_len: Optional[int] = None, **rope_kwargs
309
+ ) -> Tuple["torch.Tensor", float]:
310
+ """
311
+ Computes the inverse frequencies for llama 3.1.
312
+
313
+ Args:
314
+ config ([`~transformers.PretrainedConfig`]):
315
+ The model configuration.
316
+ device (`torch.device`):
317
+ The device to use for initialization of the inverse frequencies.
318
+ seq_len (`int`, *optional*):
319
+ The current sequence length. Unused for this type of RoPE.
320
+ rope_kwargs (`Dict`, *optional*):
321
+ BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.
322
+ Returns:
323
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
324
+ post-processing scaling factor applied to the computed cos/sin.
325
+ """
326
+ # Gets the default RoPE parameters
327
+ inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs)
328
+
329
+ factor = config.rope_scaling["factor"] # `8` in the original implementation
330
+ low_freq_factor = config.rope_scaling["low_freq_factor"] # `1` in the original implementation
331
+ high_freq_factor = config.rope_scaling["high_freq_factor"] # `4` in the original implementation
332
+ old_context_len = config.rope_scaling["original_max_position_embeddings"] # `8192` in the original implementation
333
+
334
+ low_freq_wavelen = old_context_len / low_freq_factor
335
+ high_freq_wavelen = old_context_len / high_freq_factor
336
+
337
+ wavelen = 2 * math.pi / inv_freq
338
+ # wavelen < high_freq_wavelen: do nothing
339
+ # wavelen > low_freq_wavelen: divide by factor
340
+ inv_freq_llama = torch.where(wavelen > low_freq_wavelen, inv_freq / factor, inv_freq)
341
+ # otherwise: interpolate between the two, using a smooth factor
342
+ smooth_factor = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
343
+ smoothed_inv_freq = (1 - smooth_factor) * inv_freq_llama / factor + smooth_factor * inv_freq_llama
344
+ is_medium_freq = ~(wavelen < high_freq_wavelen) * ~(wavelen > low_freq_wavelen)
345
+ inv_freq_llama = torch.where(is_medium_freq, smoothed_inv_freq, inv_freq_llama)
346
+
347
+ return inv_freq_llama, attention_factor
348
+
349
+
350
+ # This maps the "rope_type" string field in rope config to the corresponding function to compute the RoPE parameters
351
+ # from the model config. You can append new {'rope_type': callable} pairs to this dictionary to enable custom RoPE
352
+ # parameterizations, as long as the callable has the same signature.
353
+ ROPE_INIT_FUNCTIONS = {
354
+ "default": _compute_default_rope_parameters,
355
+ "linear": _compute_linear_scaling_rope_parameters,
356
+ "dynamic": _compute_dynamic_ntk_parameters,
357
+ "yarn": _compute_yarn_parameters,
358
+ "longrope": _compute_longrope_parameters,
359
+ "llama3": _compute_llama3_parameters,
360
+ }
361
+
362
+
363
+ def _check_received_keys(rope_type: str, received_keys: set, required_keys: set, optional_keys: Optional[set] = None):
364
+ """Compare the received keys in `config.rope_scaling` against the expected and optional keys"""
365
+ # BC: "rope_type" was originally "type" -- let's gracefully handle it
366
+ if "rope_type" not in received_keys and "type" in received_keys:
367
+ received_keys -= {"type"}
368
+ received_keys.add("rope_type")
369
+
370
+ missing_keys = required_keys - received_keys
371
+ if missing_keys:
372
+ raise KeyError(f"Missing required keys in `rope_scaling` for 'rope_type'='{rope_type}': {missing_keys}")
373
+
374
+ if optional_keys is not None:
375
+ unused_keys = received_keys - required_keys - optional_keys
376
+ else:
377
+ unused_keys = received_keys - required_keys
378
+ if unused_keys:
379
+ logger.warning(f"Unrecognized keys in `rope_scaling` for 'rope_type'='{rope_type}': {unused_keys}")
380
+
381
+
382
+ def _validate_default_rope_parameters(config: PretrainedConfig):
383
+ rope_scaling = config.rope_scaling
384
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type"
385
+ required_keys = {"rope_type"}
386
+ received_keys = set(rope_scaling.keys())
387
+ _check_received_keys(rope_type, received_keys, required_keys)
388
+
389
+
390
+ def _validate_linear_scaling_rope_parameters(config: PretrainedConfig):
391
+ rope_scaling = config.rope_scaling
392
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type"
393
+ required_keys = {"rope_type", "factor"}
394
+ received_keys = set(rope_scaling.keys())
395
+ _check_received_keys(rope_type, received_keys, required_keys)
396
+
397
+ factor = rope_scaling["factor"]
398
+ if factor is None or not isinstance(factor, float) or factor < 1.0:
399
+ logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}")
400
+
401
+
402
+ def _validate_dynamic_scaling_rope_parameters(config: PretrainedConfig):
403
+ rope_scaling = config.rope_scaling
404
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type"
405
+ required_keys = {"rope_type", "factor"}
406
+ # TODO (joao): update logic for the inclusion of `original_max_position_embeddings`
407
+ optional_keys = {"original_max_position_embeddings"}
408
+ received_keys = set(rope_scaling.keys())
409
+ _check_received_keys(rope_type, received_keys, required_keys, optional_keys)
410
+
411
+ factor = rope_scaling["factor"]
412
+ if factor is None or not isinstance(factor, float) or factor < 1.0:
413
+ logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}")
414
+
415
+
416
+ def _validate_yarn_parameters(config: PretrainedConfig):
417
+ rope_scaling = config.rope_scaling
418
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type"
419
+ required_keys = {"rope_type", "factor"}
420
+ optional_keys = {"attention_factor", "beta_fast", "beta_slow"}
421
+ received_keys = set(rope_scaling.keys())
422
+ _check_received_keys(rope_type, received_keys, required_keys, optional_keys)
423
+
424
+ factor = rope_scaling["factor"]
425
+ if factor is None or not isinstance(factor, float) or factor < 1.0:
426
+ logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}")
427
+
428
+ attention_factor = rope_scaling.get("attention_factor")
429
+ if attention_factor is not None and (not isinstance(attention_factor, float) or attention_factor < 0):
430
+ logger.warning(
431
+ f"`rope_scaling`'s attention_factor field must be a float greater than 0, got {attention_factor}"
432
+ )
433
+ beta_fast = rope_scaling.get("beta_fast")
434
+ if beta_fast is not None and not isinstance(beta_fast, float):
435
+ logger.warning(f"`rope_scaling`'s beta_fast field must be a float, got {beta_fast}")
436
+ beta_slow = rope_scaling.get("beta_slow")
437
+ if beta_slow is not None and not isinstance(beta_slow, float):
438
+ logger.warning(f"`rope_scaling`'s beta_slow field must be a float, got {beta_slow}")
439
+
440
+ if (beta_fast or 32) < (beta_slow or 1):
441
+ logger.warning(
442
+ f"`rope_scaling`'s beta_fast field must be greater than beta_slow, got beta_fast={beta_fast} "
443
+ f"(defaults to 32 if None) and beta_slow={beta_slow} (defaults to 1 if None)"
444
+ )
445
+
446
+
447
+ def _validate_longrope_parameters(config: PretrainedConfig):
448
+ rope_scaling = config.rope_scaling
449
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type"
450
+ required_keys = {"rope_type", "short_factor", "long_factor"}
451
+ # TODO (joao): update logic for the inclusion of `original_max_position_embeddings`
452
+ optional_keys = {"attention_factor", "factor", "original_max_position_embeddings"}
453
+ received_keys = set(rope_scaling.keys())
454
+ _check_received_keys(rope_type, received_keys, required_keys, optional_keys)
455
+
456
+ partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0
457
+ head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
458
+ dim = int(head_dim * partial_rotary_factor)
459
+
460
+ short_factor = rope_scaling.get("short_factor")
461
+ if not isinstance(short_factor, list) and all(isinstance(x, (int, float)) for x in short_factor):
462
+ logger.warning(f"`rope_scaling`'s short_factor field must be a list of numbers, got {short_factor}")
463
+ if not len(short_factor) == dim // 2:
464
+ logger.warning(f"`rope_scaling`'s short_factor field must have length {dim // 2}, got {len(short_factor)}")
465
+
466
+ long_factor = rope_scaling.get("long_factor")
467
+ if not isinstance(long_factor, list) and all(isinstance(x, (int, float)) for x in long_factor):
468
+ logger.warning(f"`rope_scaling`'s long_factor field must be a list of numbers, got {long_factor}")
469
+ if not len(long_factor) == dim // 2:
470
+ logger.warning(f"`rope_scaling`'s long_factor field must have length {dim // 2}, got {len(long_factor)}")
471
+
472
+ # Handle Phi3 divergence: prefer the use of `attention_factor` and/or `factor` over
473
+ # `original_max_position_embeddings` to compute internal variables. The latter lives outside `rope_scaling` and is
474
+ # unique to longrope (= undesirable)
475
+ if hasattr(config, "original_max_position_embeddings"):
476
+ logger.warning_once(
477
+ "This model has set a `original_max_position_embeddings` field, to be used together with "
478
+ "`max_position_embeddings` to determine a scaling factor. Please set the `factor` field of `rope_scaling`"
479
+ "with this ratio instead -- we recommend the use of this field over `original_max_position_embeddings`, "
480
+ "as it is compatible with most model architectures."
481
+ )
482
+ else:
483
+ factor = rope_scaling.get("factor")
484
+ if factor is None:
485
+ logger.warning("Missing required keys in `rope_scaling`: 'factor'")
486
+ elif not isinstance(factor, float) or factor < 1.0:
487
+ logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}")
488
+
489
+ attention_factor = rope_scaling.get("attention_factor")
490
+ if attention_factor is not None and not isinstance(attention_factor, float) or attention_factor < 0:
491
+ logger.warning(
492
+ f"`rope_scaling`'s attention_factor field must be a float greater than 0, got {attention_factor}"
493
+ )
494
+
495
+
496
+ def _validate_llama3_parameters(config: PretrainedConfig):
497
+ rope_scaling = config.rope_scaling
498
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type"
499
+ required_keys = {"rope_type", "factor", "original_max_position_embeddings", "low_freq_factor", "high_freq_factor"}
500
+ received_keys = set(rope_scaling.keys())
501
+ _check_received_keys(rope_type, received_keys, required_keys)
502
+
503
+ factor = rope_scaling["factor"]
504
+ if factor is None or not isinstance(factor, float) or factor < 1.0:
505
+ logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}")
506
+
507
+ low_freq_factor = rope_scaling["low_freq_factor"]
508
+ high_freq_factor = rope_scaling["high_freq_factor"]
509
+ if low_freq_factor is None or not isinstance(low_freq_factor, float):
510
+ logger.warning(f"`rope_scaling`'s low_freq_factor field must be a float, got {low_freq_factor}")
511
+ if high_freq_factor is None or not isinstance(high_freq_factor, float):
512
+ logger.warning(f"`rope_scaling`'s high_freq_factor field must be a float, got {high_freq_factor}")
513
+ if high_freq_factor <= low_freq_factor:
514
+ logger.warning(
515
+ "`rope_scaling`'s high_freq_factor field must be greater than low_freq_factor, got high_freq_factor="
516
+ f"{high_freq_factor} and low_freq_factor={low_freq_factor}"
517
+ )
518
+
519
+ original_max_position_embeddings = rope_scaling["original_max_position_embeddings"]
520
+ if original_max_position_embeddings is None or not isinstance(original_max_position_embeddings, int):
521
+ logger.warning(
522
+ "`rope_scaling`'s original_max_position_embeddings field must be an integer, got "
523
+ f"{original_max_position_embeddings}"
524
+ )
525
+ if original_max_position_embeddings >= config.max_position_embeddings:
526
+ logger.warning(
527
+ "`rope_scaling`'s original_max_position_embeddings field must be less than max_position_embeddings, got "
528
+ f"{original_max_position_embeddings} and max_position_embeddings={config.max_position_embeddings}"
529
+ )
530
+
531
+
532
+ # Like `ROPE_INIT_FUNCTIONS`, this validation function mapping can be dynamically updated for custom RoPE types.
533
+ ROPE_VALIDATION_FUNCTIONS = {
534
+ "default": _validate_default_rope_parameters,
535
+ "linear": _validate_linear_scaling_rope_parameters,
536
+ "dynamic": _validate_dynamic_scaling_rope_parameters,
537
+ "yarn": _validate_yarn_parameters,
538
+ "longrope": _validate_longrope_parameters,
539
+ "llama3": _validate_llama3_parameters,
540
+ }
541
+
542
+
543
+ def rope_config_validation(config: PretrainedConfig):
544
+ """
545
+ Validate the RoPE config arguments, given a `PretrainedConfig` object
546
+ """
547
+ rope_scaling = getattr(config, "rope_scaling", None) # not a default parameter in `PretrainedConfig`
548
+ if rope_scaling is None:
549
+ return
550
+
551
+ # BC: "rope_type" was originally "type"
552
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", "default"))
553
+ validation_fn = ROPE_VALIDATION_FUNCTIONS.get(rope_type)
554
+ if validation_fn is not None:
555
+ validation_fn(config)
556
+ else:
557
+ logger.warning(
558
+ f"Missing validation function mapping in `ROPE_VALIDATION_FUNCTIONS` for 'rope_type'='{rope_type}'"
559
+ )
transformers_4_44_2__pytorch_utils.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from torch import nn
16
+
17
+ ALL_LAYERNORM_LAYERS = [nn.LayerNorm]
variable_cache.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Nvidia Corporation. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from copy import deepcopy
17
+ from typing import Optional, Dict, Any, Tuple
18
+
19
+ import torch
20
+ from transformers.cache_utils import Cache # used to let GenerationMixin know that we use a Cache object
21
+
22
+ from .configuration_decilm import DeciLMConfig
23
+ from .transformers_4_44_2__cache_utils import Cache as Cache_4_44_2, SinkCache, StaticCache, SlidingWindowCache
24
+
25
+
26
+ class VariableCache(Cache_4_44_2, Cache):
27
+ """
28
+ A Cache object that supports a different Cache implementation for every layer,
29
+ including layers without any kv-cache.
30
+ Implemented using a list of Cache objects, each represents a "model" with 1 layer.
31
+ The default implementation for the layer caches is StaticCache.
32
+ The cache of each layer is allocated to the same gpu as the layer itself.
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ *, # key-word only, no positional args allowed to avoid mix-ups with newer transformers versions
38
+ config: DeciLMConfig,
39
+ batch_size: int = None,
40
+ max_cache_len: int = None,
41
+ dtype: torch.dtype = torch.float32,
42
+ max_batch_size: Optional[int] = None,
43
+ **kwargs,
44
+ ) -> None:
45
+ Cache_4_44_2.__init__(self)
46
+
47
+ self.config = deepcopy(config)
48
+ self.max_batch_size = batch_size or max_batch_size
49
+ self.batch_size = self.max_batch_size
50
+ self.max_cache_len = config.max_position_embeddings if max_cache_len is None else max_cache_len
51
+ self.dtype = dtype
52
+
53
+ self.layer_caches: list[Cache_4_44_2 | None] = [None] * config.num_hidden_layers
54
+ self.layer_devices: list[torch.device | None] = [None] * config.num_hidden_layers
55
+
56
+ def update(
57
+ self,
58
+ key_states: torch.Tensor,
59
+ value_states: torch.Tensor,
60
+ layer_idx: int,
61
+ cache_kwargs: Optional[Dict[str, Any]] = None,
62
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
63
+ if self.layer_caches[layer_idx] is None:
64
+ self.layer_devices[layer_idx] = key_states.device
65
+ self._init_layer_cache(layer_idx)
66
+
67
+ layer_cache = self.layer_caches[layer_idx]
68
+ assert layer_cache is not None, f"Trying to update the cache of a cache-less layer: {layer_idx=}"
69
+
70
+ k_out, v_out = layer_cache.update(key_states=key_states,
71
+ value_states=value_states,
72
+ layer_idx=0,
73
+ cache_kwargs=cache_kwargs)
74
+ seq_len = self.get_seq_length(layer_idx)
75
+ k_out = k_out[:, :, :seq_len, :]
76
+ v_out = v_out[:, :, :seq_len, :]
77
+ return k_out, v_out
78
+
79
+ def _init_layer_cache(self, layer_idx: int) -> None:
80
+ block_config = self.config.block_configs[layer_idx]
81
+ attention_config = block_config.attention
82
+
83
+ if attention_config.no_op or attention_config.replace_with_linear:
84
+ return None
85
+
86
+ device = self.layer_devices[layer_idx]
87
+ assert device is not None, f"Trying to init layer cache for {layer_idx=} without device"
88
+
89
+ config = deepcopy(self.config)
90
+ config.num_hidden_layers = 1
91
+ config.num_key_value_heads = self.config.num_attention_heads // attention_config.n_heads_in_group
92
+
93
+ if attention_config.window_length is not None:
94
+ if not attention_config.is_sink:
95
+ config.sliding_window = attention_config.window_length
96
+ self.layer_caches[layer_idx] = SlidingWindowCache(config=config,
97
+ max_batch_size=self.max_batch_size,
98
+ max_cache_len=self.max_cache_len,
99
+ device=device,
100
+ dtype=self.dtype)
101
+ return
102
+ elif not attention_config.unshifted_sink:
103
+ self.layer_caches[layer_idx] = SinkCache(window_length=attention_config.window_length,
104
+ num_sink_tokens=attention_config.num_sink_tokens)
105
+ return
106
+
107
+ self.layer_caches[layer_idx] = StaticCache(config=config,
108
+ max_batch_size=self.max_batch_size,
109
+ max_cache_len=self.max_cache_len,
110
+ device=device,
111
+ dtype=self.dtype)
112
+
113
+ def _get_first_real_cache(self) -> Cache:
114
+ for layer_cache in self.layer_caches:
115
+ if layer_cache is not None:
116
+ return layer_cache
117
+ raise ValueError(f"No real cache found, all layer caches are None.")
118
+
119
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
120
+ if layer_idx == 0 and self.layer_caches[0] is None:
121
+ try:
122
+ layer_cache = self._get_first_real_cache()
123
+ except ValueError:
124
+ return 0
125
+ else:
126
+ layer_cache = self.layer_caches[layer_idx]
127
+ return layer_cache.get_seq_length()
128
+
129
+ def get_max_length(self) -> Optional[int]:
130
+ """Returns the maximum sequence length of the cached states."""
131
+ return self.max_cache_len
132
+
133
+ def reset(self):
134
+ for layer_idx in range(len(self.layer_caches)):
135
+ layer_cache = self.layer_caches[layer_idx]
136
+ if hasattr(layer_cache, "reset"):
137
+ layer_cache.reset()
138
+ else:
139
+ self._init_layer_cache(layer_idx)