LimaRaed commited on
Commit
600cd01
·
verified ·
1 Parent(s): 9aac2d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -31
app.py CHANGED
@@ -4,12 +4,11 @@ import os
4
  import spaces
5
  import uuid
6
 
7
- from diffusers import AnimateDiffPipeline, EulerDiscreteScheduler
8
  from diffusers.utils import export_to_video
9
  from huggingface_hub import hf_hub_download
10
  from safetensors.torch import load_file
11
  from PIL import Image
12
- from transformers import CLIPFeatureExtractor
13
 
14
  # Constants
15
  bases = {
@@ -34,8 +33,12 @@ else:
34
  pipe = AnimateDiffPipeline.from_pretrained(bases[base_loaded], torch_dtype=dtype).to(device)
35
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
36
 
 
 
 
37
  feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
38
 
 
39
  @spaces.GPU(duration=30, queue=False)
40
  def generate_image(prompt, base="Realistic", motion="", step=8, resolution="Square", progress=gr.Progress()):
41
  global step_loaded
@@ -49,8 +52,6 @@ def generate_image(prompt, base="Realistic", motion="", step=8, resolution="Squa
49
  width, height = 512, 512
50
  elif resolution == "Horizontal":
51
  width, height = 1280, 720
52
- else:
53
- width, height = 512, 512 # default fallback
54
 
55
  if step_loaded != step:
56
  repo = "ByteDance/AnimateDiff-Lightning"
@@ -70,36 +71,37 @@ def generate_image(prompt, base="Realistic", motion="", step=8, resolution="Squa
70
  motion_loaded = motion
71
 
72
  progress((0, step))
 
73
  def progress_callback(i, t, z):
74
  progress((i+1, step))
75
 
76
- output = pipe(
77
- prompt=prompt,
78
- guidance_scale=1.2,
79
- num_inference_steps=step,
80
- width=width,
81
- height=height,
82
- callback=progress_callback,
83
- callback_steps=1
84
- )
85
 
86
  name = str(uuid.uuid4()).replace("-", "")
87
  path = f"/tmp/{name}.mp4"
88
  export_to_video(output.frames[0], path, fps=10)
89
  return path
90
 
 
91
  # Gradio Interface
92
  with gr.Blocks(css="style.css") as demo:
93
- gr.HTML("<h1><center>Textual Imagination : A Text To Video Synthesis</center></h1>")
94
-
 
95
  with gr.Group():
96
  with gr.Row():
97
- prompt = gr.Textbox(label='Prompt')
98
-
 
99
  with gr.Row():
100
  select_base = gr.Dropdown(
101
  label='Base model',
102
- choices=["Cartoon", "Realistic", "3d", "Anime"],
 
 
 
 
 
103
  value=base_loaded,
104
  interactive=True
105
  )
@@ -132,12 +134,17 @@ with gr.Blocks(css="style.css") as demo:
132
  )
133
  select_resolution = gr.Dropdown(
134
  label='Resolution',
135
- choices=["Square", "Horizontal"],
 
 
 
136
  value="Square",
137
  interactive=True
138
  )
139
- submit = gr.Button(scale=1, variant='primary')
140
-
 
 
141
  video = gr.Video(
142
  label='AnimateDiff-Lightning',
143
  autoplay=True,
@@ -147,15 +154,14 @@ with gr.Blocks(css="style.css") as demo:
147
  )
148
 
149
  gr.on(
150
- triggers=[
151
- submit.click,
152
- prompt.submit
153
- ],
154
- fn=generate_image,
155
- inputs=[prompt, select_base, select_motion, select_step, select_resolution],
156
- outputs=[video],
157
- queue=False
158
- )
159
-
160
 
161
  demo.queue().launch()
 
4
  import spaces
5
  import uuid
6
 
7
+ from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
8
  from diffusers.utils import export_to_video
9
  from huggingface_hub import hf_hub_download
10
  from safetensors.torch import load_file
11
  from PIL import Image
 
12
 
13
  # Constants
14
  bases = {
 
33
  pipe = AnimateDiffPipeline.from_pretrained(bases[base_loaded], torch_dtype=dtype).to(device)
34
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
35
 
36
+ # Safety checkers
37
+ from transformers import CLIPFeatureExtractor
38
+
39
  feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
40
 
41
+ # Function
42
  @spaces.GPU(duration=30, queue=False)
43
  def generate_image(prompt, base="Realistic", motion="", step=8, resolution="Square", progress=gr.Progress()):
44
  global step_loaded
 
52
  width, height = 512, 512
53
  elif resolution == "Horizontal":
54
  width, height = 1280, 720
 
 
55
 
56
  if step_loaded != step:
57
  repo = "ByteDance/AnimateDiff-Lightning"
 
71
  motion_loaded = motion
72
 
73
  progress((0, step))
74
+
75
  def progress_callback(i, t, z):
76
  progress((i+1, step))
77
 
78
+ output = pipe(prompt=prompt, guidance_scale=1.2, num_inference_steps=step, width=width, height=height, callback=progress_callback, callback_steps=1)
 
 
 
 
 
 
 
 
79
 
80
  name = str(uuid.uuid4()).replace("-", "")
81
  path = f"/tmp/{name}.mp4"
82
  export_to_video(output.frames[0], path, fps=10)
83
  return path
84
 
85
+
86
  # Gradio Interface
87
  with gr.Blocks(css="style.css") as demo:
88
+ gr.HTML(
89
+ "<h1><center>Textual Imagination : A Text To Video Synthesis</center></h1>"
90
+ )
91
  with gr.Group():
92
  with gr.Row():
93
+ prompt = gr.Textbox(
94
+ label='Prompt'
95
+ )
96
  with gr.Row():
97
  select_base = gr.Dropdown(
98
  label='Base model',
99
+ choices=[
100
+ "Cartoon",
101
+ "Realistic",
102
+ "3d",
103
+ "Anime",
104
+ ],
105
  value=base_loaded,
106
  interactive=True
107
  )
 
134
  )
135
  select_resolution = gr.Dropdown(
136
  label='Resolution',
137
+ choices=[
138
+ "Square",
139
+ "Horizontal",
140
+ ],
141
  value="Square",
142
  interactive=True
143
  )
144
+ submit = gr.Button(
145
+ scale=1,
146
+ variant='primary'
147
+ )
148
  video = gr.Video(
149
  label='AnimateDiff-Lightning',
150
  autoplay=True,
 
154
  )
155
 
156
  gr.on(
157
+ triggers=[
158
+ submit.click,
159
+ prompt.submit
160
+ ],
161
+ fn=generate_image,
162
+ inputs=[prompt, select_base, select_motion, select_step, select_resolution],
163
+ outputs=[video],
164
+ queue=False
165
+ )
 
166
 
167
  demo.queue().launch()