Update README.md
Browse files
README.md
CHANGED
@@ -219,18 +219,23 @@ print("### Prediction")
|
|
219 |
print(tokenizer.decode(out[0][input_len:]))
|
220 |
```
|
221 |
|
222 |
-
## Fill in the middle generation
|
223 |
```python
|
224 |
-
|
225 |
-
def
|
226 |
-
|
|
|
|
|
|
|
|
|
|
|
227 |
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
"""
|
232 |
|
233 |
-
encoded_input = tokenizer(
|
234 |
out = model.generate(
|
235 |
**encoded_input,
|
236 |
max_new_tokens=100,
|
|
|
219 |
print(tokenizer.decode(out[0][input_len:]))
|
220 |
```
|
221 |
|
222 |
+
## Fill in the middle with additional files as context generation
|
223 |
```python
|
224 |
+
example = """<filename>utils.py
|
225 |
+
def multiply(x, y):
|
226 |
+
return x * y
|
227 |
+
<filename>config.py
|
228 |
+
DEBUG = True
|
229 |
+
MAX_VALUE = 100
|
230 |
+
<filename>example.py
|
231 |
+
<fim_suffix>
|
232 |
|
233 |
+
# Test the function
|
234 |
+
result = calculate_sum(5, 10)
|
235 |
+
print(result)<fim_prefix>def calculate_sum(a, b):
|
236 |
+
<fim_middle>"""
|
237 |
|
238 |
+
encoded_input = tokenizer(example, return_tensors='pt', return_token_type_ids=False)
|
239 |
out = model.generate(
|
240 |
**encoded_input,
|
241 |
max_new_tokens=100,
|