gx123 commited on
Commit
9827131
·
verified ·
1 Parent(s): cc991f6

Create main.py

Browse files
Files changed (1) hide show
  1. main.py +31 -0
main.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ device = "cuda" # the device to load the model onto
3
+
4
+ model = AutoModelForCausalLM.from_pretrained(
5
+ "Qwen/Qwen2-72B-Instruct",
6
+ torch_dtype="auto",
7
+ device_map="auto"
8
+ )
9
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-72B-Instruct")
10
+
11
+ prompt = "Give me a short introduction to large language model."
12
+ messages = [
13
+ {"role": "system", "content": "You are a helpful assistant."},
14
+ {"role": "user", "content": prompt}
15
+ ]
16
+ text = tokenizer.apply_chat_template(
17
+ messages,
18
+ tokenize=False,
19
+ add_generation_prompt=True
20
+ )
21
+ model_inputs = tokenizer([text], return_tensors="pt").to(device)
22
+
23
+ generated_ids = model.generate(
24
+ model_inputs.input_ids,
25
+ max_new_tokens=512
26
+ )
27
+ generated_ids = [
28
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
29
+ ]
30
+
31
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]