from peft import AutoPeftModelForCausalLM from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('/home/Qwen/output_qwen') model = AutoPeftModelForCausalLM.from_pretrained( '/home/Qwen/output_qwen', # path to the output directory device_map="auto", trust_remote_code=True ).eval() model.generation_config.top_p=0 prompt='青岛海边钓鱼需要特别注意什么?' resp,hist=model.chat(tokenizer,prompt,history=None) print(resp)