Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -17,11 +17,11 @@ from CLIP import load as load_clip
|
|
17 |
|
18 |
# Device configuration
|
19 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
20 |
-
big =
|
21 |
|
22 |
# Parameters
|
23 |
IMG_SIZE = 1024 if big else 256
|
24 |
-
BATCH_SIZE = 1 if big else
|
25 |
EPOCHS = 12
|
26 |
LR = 0.0002
|
27 |
dataset_id = "K00B404/pix2pix_flux_set"
|
@@ -237,8 +237,8 @@ pipeline_tag: image-to-image
|
|
237 |
|
238 |
## Model Description
|
239 |
Custom UNet model for Pix2Pix image translation.
|
240 |
-
- **Image Size:**
|
241 |
-
- **Model Type:**
|
242 |
|
243 |
## Usage
|
244 |
|
|
|
17 |
|
18 |
# Device configuration
|
19 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
20 |
+
big = False if device == torch.device('cpu') else True
|
21 |
|
22 |
# Parameters
|
23 |
IMG_SIZE = 1024 if big else 256
|
24 |
+
BATCH_SIZE = 1 if big else 1
|
25 |
EPOCHS = 12
|
26 |
LR = 0.0002
|
27 |
dataset_id = "K00B404/pix2pix_flux_set"
|
|
|
237 |
|
238 |
## Model Description
|
239 |
Custom UNet model for Pix2Pix image translation.
|
240 |
+
- **Image Size:** {save_dict['model_config']['img_size']}
|
241 |
+
- **Model Type:** {"big" if big else "small"}_UNet ({save_dict['model_config']['img_size']})
|
242 |
|
243 |
## Usage
|
244 |
|