Aleksandr Evteev commited on
Commit
d16f617
·
1 Parent(s): ef906b4
Files changed (2) hide show
  1. app.py +37 -31
  2. requirements.txt +2 -3
app.py CHANGED
@@ -1,43 +1,49 @@
1
  import os
2
- from fastapi import FastAPI, HTTPException
3
- from pydantic import BaseModel
4
  from huggingface_hub import InferenceClient
 
5
 
6
- class GenerateRequest(BaseModel):
7
- description: str
8
-
9
- app = FastAPI()
10
-
11
  HF_TOKEN = os.getenv("HF_HUB_TOKEN")
12
  if not HF_TOKEN:
13
- raise RuntimeError("Не задан HF_HUB_TOKEN")
14
-
15
  client = InferenceClient(token=HF_TOKEN)
16
 
17
- @app.post("/generate-bpmn", response_model=str)
18
- async def generate_bpmn(req: GenerateRequest):
19
  prompt = (
20
  "You are a BPMN 2.0 XML generator.\n"
21
  "Given this plain-text process description:\n"
22
- f"{req.description}\n"
23
  "Output ONLY valid BPMN 2.0 XML, without any extra text."
24
  )
25
- try:
26
- result = client.text_generation(
27
- prompt,
28
- model="ministral/Ministral-3b-instruct",
29
- max_new_tokens=1024,
30
- temperature=0.0,
31
- do_sample=False,
32
- details=True
33
- )
34
- except Exception as e:
35
- raise HTTPException(status_code=500, detail=f"Inference API error: {e}")
36
-
37
- # Теперь result[0] — объект с полем generated_text
38
- text = result[0].generated_text
39
-
40
- if "<definitions" in text:
41
- text = text[text.index("<definitions"):]
42
-
43
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
 
 
2
  from huggingface_hub import InferenceClient
3
+ import gradio as gr
4
 
5
+ # Настраиваем HF‑клиент
 
 
 
 
6
  HF_TOKEN = os.getenv("HF_HUB_TOKEN")
7
  if not HF_TOKEN:
8
+ raise RuntimeError("Не задан HF_HUB_TOKEN в настройках Space")
 
9
  client = InferenceClient(token=HF_TOKEN)
10
 
11
+ def generate_bpmn(description: str) -> str:
 
12
  prompt = (
13
  "You are a BPMN 2.0 XML generator.\n"
14
  "Given this plain-text process description:\n"
15
+ f"{description}\n"
16
  "Output ONLY valid BPMN 2.0 XML, without any extra text."
17
  )
18
+ # Вызываем HF Inference API (без FastAPI)
19
+ result = client.text_generation(
20
+ prompt,
21
+ model="ministral/Ministral-3b-instruct",
22
+ max_new_tokens=1024,
23
+ temperature=0.0,
24
+ do_sample=False,
25
+ details=True
26
+ )
27
+ xml = result[0].generated_text
28
+ # отрезаем всё до <definitions>
29
+ if "<definitions" in xml:
30
+ xml = xml[xml.index("<definitions"):]
31
+ return xml
32
+
33
+ # Собираем Gradio‑интерфейс
34
+ with gr.Blocks() as demo:
35
+ gr.Markdown("## Генератор BPMN 2.0 XML на основе текста")
36
+ inp = gr.Textbox(
37
+ lines=5,
38
+ placeholder="Опиши процесс: «Клиент подаёт заявку, …»",
39
+ label="Описание процесса"
40
+ )
41
+ out = gr.Textbox(lines=15, label="BPMN 2.0 XML")
42
+ btn = gr.Button("Сгенерировать BPMN")
43
+ btn.click(fn=generate_bpmn, inputs=inp, outputs=out)
44
+
45
+ # HF Spaces поднимет это автоматически, порт подхватится из окружения
46
+ demo.launch(
47
+ server_name="0.0.0.0",
48
+ server_port=int(os.environ.get("PORT", 7860))
49
+ )
requirements.txt CHANGED
@@ -1,3 +1,2 @@
1
- fastapi
2
- uvicorn[standard]
3
- huggingface-hub>=0.14.1
 
1
+ gradio[oauth]==5.25.2
2
+ huggingface-hub>=0.14.1