File size: 3,078 Bytes
9c12531
c886764
 
 
 
 
 
1a802f9
c886764
1a802f9
 
b3dc3b1
6f8f9b1
333a84a
8d1ba3e
1adcbaf
 
c886764
 
 
 
 
 
 
 
 
 
 
 
 
abe37a4
c886764
abe37a4
 
c886764
 
 
 
 
 
 
 
 
 
c7f8a5d
 
c886764
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c7f8a5d
9c12531
abe37a4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import gradio as gr
from huggingface_hub import InferenceClient
import json

# Список доступных моделей
models_list = [
    "google/gemma-3-27b-it",
    "mistralai/Mistral-7B-Instruct-v0.3",
    "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
    "open-r1/OlympicCoder-7B",
    "google/gemma-2-27b-it",
    "Qwen/QwQ-32B",
    "Qwen/QwQ-32B-Preview",
    "google/gemma-2-9b-it",
    "Qwen/Qwen2.5-72B-Instruct",
    "mistralai/Mistral-7B-Instruct-v0.3",
    "NousResearch/DeepHermes-3-Llama-3-3B-Preview"
]

def add_message(role, content, messages):
    messages.append({"role": role, "content": content})
    return messages, len(messages), str(messages)

def clear_messages(messages):
    return [], 0, "[]"

def start_conversation(model, messages, max_tokens, temperature, top_p):
    client = InferenceClient(model)
    response = client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=False,
        temperature=temperature,
        top_p=top_p,
    )
    return response.choices[0].message.content

def show_messages(messages):
    return str(messages)

def get_messages_api(messages):
    return json.dumps(messages, indent=4)

demo = gr.Blocks()

with demo:
    gr.Markdown("# Chat Interface")
    role_input = gr.Textbox(label="Role")
    content_input = gr.Textbox(label="Content")
    messages_state = gr.State(value=[])
    messages_output = gr.Textbox(label="Messages", value="[]")
    count_output = gr.Number(label="Count", value=0)
    response_output = gr.Textbox(label="Response")
    messages_api_output = gr.Textbox(label="Messages API")

    add_button = gr.Button("Add")
    clear_button = gr.Button("Clear")
    start_button = gr.Button("Start")
    show_button = gr.Button("Show messages")
    get_api_button = gr.Button("Get messages API")

    model_input = gr.Radio(
        label="Select a model",
        choices=models_list,
        value=models_list[0],
    )

    max_tokens_slider = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
    temperature_slider = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
    top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")

    add_button.click(
        add_message,
        inputs=[role_input, content_input, messages_state],
        outputs=[messages_state, count_output, messages_output],
    )

    clear_button.click(
        clear_messages,
        inputs=[messages_state],
        outputs=[messages_state, count_output, messages_output],
    )

    start_button.click(
        start_conversation,
        inputs=[model_input, messages_state, max_tokens_slider, temperature_slider, top_p_slider],
        outputs=[response_output],
    )

    show_button.click(
        show_messages,
        inputs=[messages_state],
        outputs=[messages_output],
    )

    get_api_button.click(
        get_messages_api,
        inputs=[messages_state],
        outputs=[messages_api_output],
    )

if __name__ == "__main__":
    demo.launch()