|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import streamlit as st |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("samwit/koala-7b") |
|
|
|
model = AutoModelForCausalLM.from_pretrained("samwit/koala-7b") |
|
|
|
st.title("Raven Text Generator") |
|
st.write("Ask a question about ravens and get a response!") |
|
|
|
|
|
question = st.text_input("Ask a question") |
|
|
|
if st.button("Generate Response"): |
|
if question.strip() != "": |
|
|
|
prompt = f"### Instruction: {question}\n### Response:" |
|
inputs = tokenizer(prompt, return_tensors="pt") |
|
output = model.generate(inputs["input_ids"], max_new_tokens=100) |
|
generated_text = tokenizer.decode(output[0].tolist(), skip_special_tokens=True) |
|
st.markdown("## Generated Response") |
|
st.write(generated_text) |
|
else: |
|
st.warning("Please enter a question.") |