Update README.md
Browse files
README.md
CHANGED
@@ -60,17 +60,22 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
60 |
model = AutoModelForCausalLM.from_pretrained("bytedance-research/BFS-Prover")
|
61 |
tokenizer = AutoTokenizer.from_pretrained("bytedance-research/BFS-Prover")
|
62 |
|
63 |
-
# Input format
|
|
|
|
|
64 |
state = "h : x = y + 2 ⊢ x - 1 = y + 1"
|
65 |
sep = ":::"
|
66 |
-
prompt = state + sep
|
|
|
67 |
inputs = tokenizer(prompt, return_tensors="pt")
|
68 |
outputs = model.generate(**inputs)
|
69 |
tactic = tokenizer.decode(outputs[0], skip_special_tokens=True).split(sep)[1]
|
|
|
70 |
|
71 |
-
#
|
72 |
-
# Input state:
|
73 |
-
#
|
|
|
74 |
# Final tactic: "simp [h]"
|
75 |
```
|
76 |
|
|
|
60 |
model = AutoModelForCausalLM.from_pretrained("bytedance-research/BFS-Prover")
|
61 |
tokenizer = AutoTokenizer.from_pretrained("bytedance-research/BFS-Prover")
|
62 |
|
63 |
+
# Input format: the model expects tactic states in the format f"{state}:::"
|
64 |
+
# The model will echo back the input state followed by the generated tactic
|
65 |
+
|
66 |
state = "h : x = y + 2 ⊢ x - 1 = y + 1"
|
67 |
sep = ":::"
|
68 |
+
prompt = state + sep # Creates "h : x = y + 2 ⊢ x - 1 = y + 1:::"
|
69 |
+
|
70 |
inputs = tokenizer(prompt, return_tensors="pt")
|
71 |
outputs = model.generate(**inputs)
|
72 |
tactic = tokenizer.decode(outputs[0], skip_special_tokens=True).split(sep)[1]
|
73 |
+
print(tactic)
|
74 |
|
75 |
+
# Complete example:
|
76 |
+
# Input state: "h : x = y + 2 ⊢ x - 1 = y + 1"
|
77 |
+
# Full prompt: "h : x = y + 2 ⊢ x - 1 = y + 1:::"
|
78 |
+
# Model output: "h : x = y + 2 ⊢ x - 1 = y + 1:::simp [h]"
|
79 |
# Final tactic: "simp [h]"
|
80 |
```
|
81 |
|