Update README.md
Browse files
README.md
CHANGED
@@ -52,6 +52,9 @@ BFS-Prover achieves state-of-the-art performance on the MiniF2F test benchmark.
|
|
52 |
- Combined with simpler search method (BFS) rather than MCTS
|
53 |
|
54 |
## Usage
|
|
|
|
|
|
|
55 |
|
56 |
```python
|
57 |
# Example code for loading and using the tactic generator model
|
@@ -59,10 +62,6 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
59 |
|
60 |
model = AutoModelForCausalLM.from_pretrained("bytedance-research/BFS-Prover")
|
61 |
tokenizer = AutoTokenizer.from_pretrained("bytedance-research/BFS-Prover")
|
62 |
-
|
63 |
-
# Input format: the model expects tactic states in the format f"{state}:::"
|
64 |
-
# The model will echo back the input state followed by the generated tactic
|
65 |
-
|
66 |
state = "h : x = y + 2 ⊢ x - 1 = y + 1"
|
67 |
sep = ":::"
|
68 |
prompt = state + sep # Creates "h : x = y + 2 ⊢ x - 1 = y + 1:::"
|
|
|
52 |
- Combined with simpler search method (BFS) rather than MCTS
|
53 |
|
54 |
## Usage
|
55 |
+
The model expects tactic states in the format `"{state}:::"`, where `:::` serves as a special indicator to signal the model to generate a tactic for the given state.
|
56 |
+
The model will echo back the input state followed by the generated tactic.
|
57 |
+
|
58 |
|
59 |
```python
|
60 |
# Example code for loading and using the tactic generator model
|
|
|
62 |
|
63 |
model = AutoModelForCausalLM.from_pretrained("bytedance-research/BFS-Prover")
|
64 |
tokenizer = AutoTokenizer.from_pretrained("bytedance-research/BFS-Prover")
|
|
|
|
|
|
|
|
|
65 |
state = "h : x = y + 2 ⊢ x - 1 = y + 1"
|
66 |
sep = ":::"
|
67 |
prompt = state + sep # Creates "h : x = y + 2 ⊢ x - 1 = y + 1:::"
|