Update README.md
Browse files
README.md
CHANGED
@@ -59,14 +59,11 @@ import torch
|
|
59 |
from unsloth import FastModel
|
60 |
from transformers import TextStreamer
|
61 |
|
62 |
-
|
63 |
|
64 |
# 1. Model and Tokenizer Loading
|
65 |
max_seq_length = 1024
|
66 |
model_name = "NuclearAi/Nuke_X_Gemma3_1B_Reasoner_Testing"
|
67 |
|
68 |
-
|
69 |
-
|
70 |
print(f"Loading model: {model_name}...")
|
71 |
|
72 |
model, tokenizer = FastModel.from_pretrained(
|
@@ -78,7 +75,6 @@ model, tokenizer = FastModel.from_pretrained(
|
|
78 |
)
|
79 |
print("Model loaded.")
|
80 |
|
81 |
-
|
82 |
|
83 |
# 2. Define Prompt Structure
|
84 |
reasoning_start = "<think>"
|
@@ -86,7 +82,6 @@ reasoning_end = "</think>"
|
|
86 |
solution_start = "<response>"
|
87 |
solution_end = "</response>"
|
88 |
|
89 |
-
|
90 |
|
91 |
system_prompt = \
|
92 |
f"""You are given a problem.
|
@@ -99,7 +94,6 @@ Then, provide your solution between {solution_start}{solution_end}"""
|
|
99 |
user_question = "Write a short story about a cat who learns to fly." # Try another question
|
100 |
|
101 |
|
102 |
-
|
103 |
# 4. Format Input for Chat Model
|
104 |
messages = [
|
105 |
{"role": "system", "content": system_prompt},
|
@@ -113,7 +107,6 @@ text_input = tokenizer.apply_chat_template(
|
|
113 |
)
|
114 |
|
115 |
|
116 |
-
|
117 |
# 5. Tokenize and Prepare for Generation
|
118 |
device = model.device if hasattr(model, 'device') else ('cuda' if torch.cuda.is_available() else 'cpu')
|
119 |
inputs = tokenizer([text_input], return_tensors="pt").to(device)
|
|
|
59 |
from unsloth import FastModel
|
60 |
from transformers import TextStreamer
|
61 |
|
|
|
62 |
|
63 |
# 1. Model and Tokenizer Loading
|
64 |
max_seq_length = 1024
|
65 |
model_name = "NuclearAi/Nuke_X_Gemma3_1B_Reasoner_Testing"
|
66 |
|
|
|
|
|
67 |
print(f"Loading model: {model_name}...")
|
68 |
|
69 |
model, tokenizer = FastModel.from_pretrained(
|
|
|
75 |
)
|
76 |
print("Model loaded.")
|
77 |
|
|
|
78 |
|
79 |
# 2. Define Prompt Structure
|
80 |
reasoning_start = "<think>"
|
|
|
82 |
solution_start = "<response>"
|
83 |
solution_end = "</response>"
|
84 |
|
|
|
85 |
|
86 |
system_prompt = \
|
87 |
f"""You are given a problem.
|
|
|
94 |
user_question = "Write a short story about a cat who learns to fly." # Try another question
|
95 |
|
96 |
|
|
|
97 |
# 4. Format Input for Chat Model
|
98 |
messages = [
|
99 |
{"role": "system", "content": system_prompt},
|
|
|
107 |
)
|
108 |
|
109 |
|
|
|
110 |
# 5. Tokenize and Prepare for Generation
|
111 |
device = model.device if hasattr(model, 'device') else ('cuda' if torch.cuda.is_available() else 'cpu')
|
112 |
inputs = tokenizer([text_input], return_tensors="pt").to(device)
|