miguelcarv commited on
Commit
33b739d
·
verified ·
1 Parent(s): b5ae57f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -21,15 +21,15 @@ import transformers
21
  model = transformers.AutoModelForCausalLM.from_pretrained(
22
  "miguelcarv/phi-2-slimorca",
23
  trust_remote_code=True,
24
- torch_dtype = torch.bfloat16
25
  ).to('cuda')
26
  tokenizer = transformers.AutoTokenizer.from_pretrained("microsoft/phi-2")
27
 
28
 
29
- SYSTEM_PROMPT = "You are an AI assistant. You will be given a task. You must generate a detailed and long answer."
30
  input_text = f"""{SYSTEM_PROMPT}
31
 
32
- Instruction: Give me the first 5 prime numbers and explain what prime numbers are.
33
  Output:"""
34
 
35
  with torch.no_grad():
@@ -39,7 +39,7 @@ with torch.no_grad():
39
  num_beams = 3,
40
  eos_token_id = tokenizer.eos_token_id
41
  )
42
- print(tokenizer.decode(outputs[0], skip_special_tokens=False))
43
  ```
44
 
45
  ## Training Details
 
21
  model = transformers.AutoModelForCausalLM.from_pretrained(
22
  "miguelcarv/phi-2-slimorca",
23
  trust_remote_code=True,
24
+ torch_dtype=torch.bfloat16
25
  ).to('cuda')
26
  tokenizer = transformers.AutoTokenizer.from_pretrained("microsoft/phi-2")
27
 
28
 
29
+ SYSTEM_PROMPT = "You are an AI assistant. You will be given a task. You must generate a short and concise answer."
30
  input_text = f"""{SYSTEM_PROMPT}
31
 
32
+ Instruction: Give me the first 5 prime numbers and explain what prime numbers are.
33
  Output:"""
34
 
35
  with torch.no_grad():
 
39
  num_beams = 3,
40
  eos_token_id = tokenizer.eos_token_id
41
  )
42
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
43
  ```
44
 
45
  ## Training Details