Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,23 @@ from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, Autoe
|
|
7 |
from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
|
8 |
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
dtype = torch.bfloat16
|
11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
|
@@ -74,6 +91,8 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidan
|
|
74 |
output_type="pil",
|
75 |
good_vae=good_vae,
|
76 |
):
|
|
|
|
|
77 |
yield img, seed
|
78 |
|
79 |
examples = [
|
@@ -169,7 +188,7 @@ with gr.Blocks(css=css) as demo:
|
|
169 |
|
170 |
gr.on(
|
171 |
triggers=[run_button.click, prompt.submit],
|
172 |
-
fn =
|
173 |
inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
174 |
outputs = [result, seed]
|
175 |
)
|
|
|
7 |
from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
|
8 |
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
|
9 |
|
10 |
+
import logging
|
11 |
+
import sys
|
12 |
+
from datetime import datetime
|
13 |
+
import gc
|
14 |
+
|
15 |
+
# Configure logging
|
16 |
+
logging.basicConfig(
|
17 |
+
level=logging.INFO,
|
18 |
+
format='%(asctime)s - %(levelname)s - %(message)s',
|
19 |
+
handlers=[
|
20 |
+
logging.StreamHandler(sys.stdout),
|
21 |
+
logging.FileHandler('transcription.log')
|
22 |
+
]
|
23 |
+
)
|
24 |
+
logger = logging.getLogger(__name__)
|
25 |
+
|
26 |
+
|
27 |
dtype = torch.bfloat16
|
28 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
29 |
|
|
|
91 |
output_type="pil",
|
92 |
good_vae=good_vae,
|
93 |
):
|
94 |
+
logger.info(f"PROMPT: {prompt}")
|
95 |
+
|
96 |
yield img, seed
|
97 |
|
98 |
examples = [
|
|
|
188 |
|
189 |
gr.on(
|
190 |
triggers=[run_button.click, prompt.submit],
|
191 |
+
fn = infer,
|
192 |
inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
193 |
outputs = [result, seed]
|
194 |
)
|