Metal3d commited on
Commit
3eec860
·
unverified ·
1 Parent(s): 4e320dc
Files changed (1) hide show
  1. main.py +27 -33
main.py CHANGED
@@ -5,30 +5,29 @@ import gradio as gr
5
  import spaces
6
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
7
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  JS = """
9
  () => {
10
  // auto scroll .auto-scroll elements when text has changed
 
11
  const observer = new MutationObserver((mutations) => {
12
- mutations.forEach((mutation) => {
13
- // find the parent element with .auto-scroll class and having the "overflow"
14
- // style attribute to "auto"
15
- let element = mutation.target;
16
- while(element.parentElement !== null && element.parentElement.style.overflow !== "auto") {
17
- element = element.parentElement;
18
- }
19
- if (element.parentElement === null) {
20
- return;
21
- }
22
- element = element.parentElement;
23
- element.scrollTop = element.scrollHeight;
24
- });
25
  })
26
- document.querySelectorAll('.auto-scroll > *').forEach((elem) => {
27
- console.log("observing", elem)
28
- observer.observe(elem, {
29
  childList: true,
30
  characterData: true,
31
- })
32
  });
33
  }
34
  """
@@ -121,17 +120,23 @@ chat_bot = gr.Chatbot(
121
  )
122
 
123
 
124
- with gr.Blocks(js=JS, fill_height=True, title="Reasoning model example") as demo:
 
 
 
 
 
 
125
  reasoning = gr.Markdown(
126
  "# Reasoning\n\nWhen the model will reasoning, its thoughts will be displayed here.",
127
  label="Reasoning",
128
  show_label=True,
129
  container=True,
130
- elem_classes="auto-scroll",
131
  render=False,
132
  )
133
- with gr.Row(equal_height=True, height="90vh"):
134
- with gr.Column(scale=3):
135
  gr.ChatInterface(
136
  chat,
137
  type="messages",
@@ -148,19 +153,8 @@ with gr.Blocks(js=JS, fill_height=True, title="Reasoning model example") as demo
148
  additional_outputs=[reasoning],
149
  )
150
 
151
- with gr.Column(variant="compact", scale=1):
152
  reasoning.render()
153
- gr.Markdown(
154
- "\n\n"
155
- "Note that we are deliberately using a small model with 1.5B parameters. "
156
- "In spite of this, the model's reasoning capability offers highly relevant "
157
- "answer generation. This interface can run on a personal computer if you have 6GB of GPU memory."
158
- "\n\n"
159
- "See [The model page](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B) "
160
- "for more information.",
161
- container=True,
162
- max_height="20%",
163
- )
164
 
165
 
166
  if __name__ == "__main__":
 
5
  import spaces
6
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
7
 
8
+ CSS = """
9
+ .m3d-auto-scroll > * {
10
+ overflow: auto;
11
+ }
12
+
13
+ #reasoning {
14
+ overflow: auto;
15
+ height: calc(100vh - 128px);
16
+ scroll-behavior: smooth;
17
+ }
18
+ """
19
+
20
  JS = """
21
  () => {
22
  // auto scroll .auto-scroll elements when text has changed
23
+ const block = document.querySelector('#reasoning');
24
  const observer = new MutationObserver((mutations) => {
25
+ block.scrollTop = block.scrollHeight;
 
 
 
 
 
 
 
 
 
 
 
 
26
  })
27
+ observer.observe(block, {
 
 
28
  childList: true,
29
  characterData: true,
30
+ subtree: true,
31
  });
32
  }
33
  """
 
120
  )
121
 
122
 
123
+ with gr.Blocks(
124
+ theme="davehornik/Tealy",
125
+ js=JS,
126
+ css=CSS,
127
+ fill_height=True,
128
+ title="Reasoning model example",
129
+ ) as demo:
130
  reasoning = gr.Markdown(
131
  "# Reasoning\n\nWhen the model will reasoning, its thoughts will be displayed here.",
132
  label="Reasoning",
133
  show_label=True,
134
  container=True,
135
+ elem_classes="m3d-auto-scroll",
136
  render=False,
137
  )
138
+ with gr.Row(equal_height=True, variant="panel"):
139
+ with gr.Column(scale=3, variant="compact"):
140
  gr.ChatInterface(
141
  chat,
142
  type="messages",
 
153
  additional_outputs=[reasoning],
154
  )
155
 
156
+ with gr.Column(variant="compact", elem_id="reasoning"):
157
  reasoning.render()
 
 
 
 
 
 
 
 
 
 
 
158
 
159
 
160
  if __name__ == "__main__":