Update README.md
Browse files
README.md
CHANGED
@@ -20,8 +20,9 @@ import transformers
|
|
20 |
|
21 |
model = transformers.AutoModelForCausalLM.from_pretrained(
|
22 |
"miguelcarv/phi-2-slimorca",
|
23 |
-
trust_remote_code=True
|
24 |
-
|
|
|
25 |
tokenizer = transformers.AutoTokenizer.from_pretrained("microsoft/phi-2")
|
26 |
|
27 |
|
@@ -33,12 +34,12 @@ Output:"""
|
|
33 |
|
34 |
with torch.no_grad():
|
35 |
outputs = model.generate(
|
36 |
-
tokenizer(input_text, return_tensors="pt")['input_ids'],
|
37 |
max_length=1024,
|
38 |
num_beams = 3,
|
39 |
eos_token_id = tokenizer.eos_token_id
|
40 |
)
|
41 |
-
print(tokenizer.decode(outputs[0], skip_special_tokens=
|
42 |
```
|
43 |
|
44 |
## Training Details
|
|
|
20 |
|
21 |
model = transformers.AutoModelForCausalLM.from_pretrained(
|
22 |
"miguelcarv/phi-2-slimorca",
|
23 |
+
trust_remote_code=True,
|
24 |
+
torch_dtype = torch.bfloat16
|
25 |
+
).to('cuda')
|
26 |
tokenizer = transformers.AutoTokenizer.from_pretrained("microsoft/phi-2")
|
27 |
|
28 |
|
|
|
34 |
|
35 |
with torch.no_grad():
|
36 |
outputs = model.generate(
|
37 |
+
tokenizer(input_text, return_tensors="pt")['input_ids'].to('cuda'),
|
38 |
max_length=1024,
|
39 |
num_beams = 3,
|
40 |
eos_token_id = tokenizer.eos_token_id
|
41 |
)
|
42 |
+
print(tokenizer.decode(outputs[0], skip_special_tokens=False))
|
43 |
```
|
44 |
|
45 |
## Training Details
|