File size: 980 Bytes
43dfcb4
 
 
a042655
 
 
 
 
8f08840
7a7fb4c
 
43dfcb4
7a7fb4c
 
43dfcb4
7a7fb4c
 
 
 
3606011
7a7fb4c
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM

tokenizer = AutoTokenizer.from_pretrained("samwit/koala-7b")

model = AutoModelForCausalLM.from_pretrained("samwit/koala-7b")

st.title("Raven Text Generator")
st.write("Ask a question about ravens and get a response!")

# Input question
question = st.text_input("Ask a question")

if st.button("Generate Response"):
    if question.strip() != "":
        # Generate response based on the provided question
        prompt = f"### Instruction: {question}\n### Response:"
        inputs = tokenizer(prompt, return_tensors="pt")
        output = model.generate(inputs["input_ids"], max_new_tokens=100)
        generated_text = tokenizer.decode(output[0].tolist(), skip_special_tokens=True)
        st.markdown("## Generated Response")
        st.write(generated_text)
    else:
        st.warning("Please enter a question.")