Text Generation
Transformers
Safetensors
qwen2
conversational
text-generation-inference
Inference Endpoints
chaoscodes commited on
Commit
60decc5
·
verified ·
1 Parent(s): 5c1d9f3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -61,7 +61,7 @@ def generate(question_list,model_path):
61
  max_tokens=4096,
62
  temperature=0.0,
63
  n=1,
64
- skip_special_tokens=False
65
  )
66
  outputs = llm.generate(question_list, sampling_params, use_tqdm=True)
67
  completions = [[output.text for output in output_item.outputs] for output_item in outputs]
 
61
  max_tokens=4096,
62
  temperature=0.0,
63
  n=1,
64
+ skip_special_tokens=True # hide special tokens such as "<|continue|>", "<|reflect|>", and "<|explore|>"
65
  )
66
  outputs = llm.generate(question_list, sampling_params, use_tqdm=True)
67
  completions = [[output.text for output in output_item.outputs] for output_item in outputs]