Update README.md
Browse files
README.md
CHANGED
@@ -49,7 +49,6 @@ Through format tuning, the LLM has adopted the COAT reasoning style but struggle
|
|
49 |
import os
|
50 |
from tqdm import tqdm
|
51 |
import torch
|
52 |
-
from transformers import AutoTokenizer
|
53 |
from vllm import LLM, SamplingParams
|
54 |
|
55 |
def generate(question_list,model_path):
|
@@ -68,7 +67,7 @@ def generate(question_list,model_path):
|
|
68 |
completions = [[output.text for output in output_item.outputs] for output_item in outputs]
|
69 |
return completions
|
70 |
|
71 |
-
def prepare_prompt(question
|
72 |
prompt = f"<|im_start|>user\nSolve the following math problem efficiently and clearly.\nPlease reason step by step, and put your final answer within \\boxed{{}}.\nProblem: {question}<|im_end|>\n<|im_start|>assistant\n"
|
73 |
return prompt
|
74 |
|
@@ -77,9 +76,8 @@ def run():
|
|
77 |
all_problems = [
|
78 |
"which number is larger? 9.11 or 9.9?",
|
79 |
]
|
80 |
-
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
81 |
completions = generate(
|
82 |
-
[prepare_prompt(problem_data
|
83 |
model_path
|
84 |
)
|
85 |
|
|
|
49 |
import os
|
50 |
from tqdm import tqdm
|
51 |
import torch
|
|
|
52 |
from vllm import LLM, SamplingParams
|
53 |
|
54 |
def generate(question_list,model_path):
|
|
|
67 |
completions = [[output.text for output in output_item.outputs] for output_item in outputs]
|
68 |
return completions
|
69 |
|
70 |
+
def prepare_prompt(question):
|
71 |
prompt = f"<|im_start|>user\nSolve the following math problem efficiently and clearly.\nPlease reason step by step, and put your final answer within \\boxed{{}}.\nProblem: {question}<|im_end|>\n<|im_start|>assistant\n"
|
72 |
return prompt
|
73 |
|
|
|
76 |
all_problems = [
|
77 |
"which number is larger? 9.11 or 9.9?",
|
78 |
]
|
|
|
79 |
completions = generate(
|
80 |
+
[prepare_prompt(problem_data) for problem_data in all_problems],
|
81 |
model_path
|
82 |
)
|
83 |
|