INFERENCE

import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained("Mr-Vicky-01/QnA-router")
model = AutoModelForSequenceClassification.from_pretrained("Mr-Vicky-01/QnA-router")
model.to(device)
model.eval()

def preprocess_input(pre_conversation, question):
    if pre_conversation:
        input_text = pre_conversation + "[SEP]" + question
    else:
        input_text = question
    return input_text

def predict(pre_conversation, question):
    input_text = preprocess_input(pre_conversation, question)
    print(f"Processed input: {input_text}")

    inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
    
    inputs = {key: value.to(device) for key, value in inputs.items()}

    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted_class_id = torch.argmax(logits, dim=1).item()

    predicted_label = model.config.id2label[predicted_class_id]
    return predicted_label


single_question = "make a python code"
print("\nPredicting for single question...")
result = predict(pre_conversation="", question=single_question)
print(f"Predicted model: {result}")

# Example 2: Pre-conversation + new question
pre_conversation = "hi[SEP]Hello! How can I help you today?[SEP]how are you[SEP]I'm doing great, thanks for asking! What about you?"
new_question = "what is AI"
print("\nPredicting for conversation + new question...")
result = predict(pre_conversation=pre_conversation, question=new_question)
print(f"Predicted model: {result}")
Downloads last month
32
Safetensors
Model size
67M params
Tensor type
F32
ยท
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support