KingNish commited on
Commit
06a1055
·
verified ·
1 Parent(s): 56bc2e4

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -25
app.py CHANGED
@@ -1,15 +1,8 @@
1
- import spaces
2
  import gradio as gr
3
  import numpy as np
4
  import os
5
  import torch
6
  import random
7
- import subprocess
8
- subprocess.run(
9
- "pip install flash-attn --no-build-isolation",
10
- env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
11
- shell=True,
12
- )
13
 
14
  from accelerate import infer_auto_device_map, load_checkpoint_and_dispatch, init_empty_weights
15
  from PIL import Image
@@ -25,22 +18,9 @@ from modeling.bagel import (
25
  )
26
  from modeling.qwen2 import Qwen2Tokenizer
27
 
28
- from huggingface_hub import snapshot_download
29
-
30
- save_dir = "./model"
31
- repo_id = "ByteDance-Seed/BAGEL-7B-MoT"
32
- cache_dir = save_dir + "/cache"
33
-
34
- snapshot_download(cache_dir=cache_dir,
35
- local_dir=save_dir,
36
- repo_id=repo_id,
37
- local_dir_use_symlinks=False,
38
- resume_download=True,
39
- allow_patterns=["*.json", "*.safetensors", "*.bin", "*.py", "*.md", "*.txt"],
40
- )
41
 
42
  # Model Initialization
43
- model_path = "./model" #Download from https://huggingface.co/ByteDance-Seed/BAGEL-7B-MoT
44
 
45
  llm_config = Qwen2Config.from_json_file(os.path.join(model_path, "llm_config.json"))
46
  llm_config.qk_norm = True
@@ -320,7 +300,6 @@ with gr.Blocks() as demo:
320
  )
321
 
322
  # Process function based on thinking option and hyperparameters
323
- @spaces.GPU(duration=90)
324
  def process_text_to_image(prompt, show_thinking, cfg_text_scale,
325
  cfg_interval, timestep_shift,
326
  num_timesteps, cfg_renorm_min, cfg_renorm_type,
@@ -412,7 +391,6 @@ with gr.Blocks() as demo:
412
  )
413
 
414
  # Process editing with thinking option and hyperparameters
415
- @spaces.GPU(duration=90)
416
  def process_edit_image(image, prompt, show_thinking, cfg_text_scale,
417
  cfg_img_scale, cfg_interval,
418
  timestep_shift, num_timesteps, cfg_renorm_min,
@@ -466,7 +444,6 @@ with gr.Blocks() as demo:
466
  img_understand_btn = gr.Button("Submit")
467
 
468
  # Process understanding with thinking option and hyperparameters
469
- @spaces.GPU(duration=90)
470
  def process_understanding(image, prompt, show_thinking, do_sample,
471
  text_temperature, max_new_tokens):
472
  result = image_understanding(
@@ -525,4 +502,4 @@ with gr.Blocks() as demo:
525
  </div>
526
  """)
527
 
528
- demo.launch()
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import os
4
  import torch
5
  import random
 
 
 
 
 
 
6
 
7
  from accelerate import infer_auto_device_map, load_checkpoint_and_dispatch, init_empty_weights
8
  from PIL import Image
 
18
  )
19
  from modeling.qwen2 import Qwen2Tokenizer
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  # Model Initialization
23
+ model_path = "/path/to/BAGEL-7B-MoT/weights" #Download from https://huggingface.co/ByteDance-Seed/BAGEL-7B-MoT
24
 
25
  llm_config = Qwen2Config.from_json_file(os.path.join(model_path, "llm_config.json"))
26
  llm_config.qk_norm = True
 
300
  )
301
 
302
  # Process function based on thinking option and hyperparameters
 
303
  def process_text_to_image(prompt, show_thinking, cfg_text_scale,
304
  cfg_interval, timestep_shift,
305
  num_timesteps, cfg_renorm_min, cfg_renorm_type,
 
391
  )
392
 
393
  # Process editing with thinking option and hyperparameters
 
394
  def process_edit_image(image, prompt, show_thinking, cfg_text_scale,
395
  cfg_img_scale, cfg_interval,
396
  timestep_shift, num_timesteps, cfg_renorm_min,
 
444
  img_understand_btn = gr.Button("Submit")
445
 
446
  # Process understanding with thinking option and hyperparameters
 
447
  def process_understanding(image, prompt, show_thinking, do_sample,
448
  text_temperature, max_new_tokens):
449
  result = image_understanding(
 
502
  </div>
503
  """)
504
 
505
+ demo.launch(share=True)