Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
@@ -9,7 +9,7 @@ pipeline_tag: unconditional-image-generation
|
|
9 |
|
10 |
---
|
11 |
|
12 |
-
.
|
93 |
```
|
94 |
-
%run -m qai_hub_models.models.
|
95 |
```
|
96 |
|
97 |
|
@@ -104,7 +104,7 @@ device. This script does the following:
|
|
104 |
* Accuracy check between PyTorch and on-device outputs.
|
105 |
|
106 |
```bash
|
107 |
-
python -m qai_hub_models.models.
|
108 |
```
|
109 |
```
|
110 |
Profiling Results
|
@@ -139,75 +139,21 @@ Compute Unit(s) : NPU (4933 ops)
|
|
139 |
|
140 |
## How does this work?
|
141 |
|
142 |
-
This [export script](https://aihub.qualcomm.com/models/
|
143 |
leverages [Qualcomm® AI Hub](https://aihub.qualcomm.com/) to optimize, validate, and deploy this model
|
144 |
on-device. Lets go through each step below in detail:
|
145 |
|
146 |
-
Step 1: **
|
147 |
-
|
148 |
-
To compile a PyTorch model for on-device deployment, we first trace the model
|
149 |
-
in memory using the `jit.trace` and then call the `submit_compile_job` API.
|
150 |
|
|
|
151 |
```python
|
152 |
import torch
|
153 |
|
154 |
import qai_hub as hub
|
155 |
-
from qai_hub_models.models.
|
156 |
|
157 |
# Load the model
|
158 |
-
model = Model.
|
159 |
-
text_encoder_model = model.text_encoder
|
160 |
-
unet_model = model.unet
|
161 |
-
vae_decoder_model = model.vae_decoder
|
162 |
-
|
163 |
-
# Device
|
164 |
-
device = hub.Device("Samsung Galaxy S23")
|
165 |
-
|
166 |
-
# Trace model
|
167 |
-
text_encoder_input_shape = text_encoder_model.get_input_spec()
|
168 |
-
text_encoder_sample_inputs = text_encoder_model.sample_inputs()
|
169 |
-
|
170 |
-
traced_text_encoder_model = torch.jit.trace(text_encoder_model, [torch.tensor(data[0]) for _, data in text_encoder_sample_inputs.items()])
|
171 |
-
|
172 |
-
# Compile model on a specific device
|
173 |
-
text_encoder_compile_job = hub.submit_compile_job(
|
174 |
-
model=traced_text_encoder_model ,
|
175 |
-
device=device,
|
176 |
-
input_specs=text_encoder_model.get_input_spec(),
|
177 |
-
)
|
178 |
-
|
179 |
-
# Get target model to run on-device
|
180 |
-
text_encoder_target_model = text_encoder_compile_job.get_target_model()
|
181 |
-
# Trace model
|
182 |
-
unet_input_shape = unet_model.get_input_spec()
|
183 |
-
unet_sample_inputs = unet_model.sample_inputs()
|
184 |
-
|
185 |
-
traced_unet_model = torch.jit.trace(unet_model, [torch.tensor(data[0]) for _, data in unet_sample_inputs.items()])
|
186 |
-
|
187 |
-
# Compile model on a specific device
|
188 |
-
unet_compile_job = hub.submit_compile_job(
|
189 |
-
model=traced_unet_model ,
|
190 |
-
device=device,
|
191 |
-
input_specs=unet_model.get_input_spec(),
|
192 |
-
)
|
193 |
-
|
194 |
-
# Get target model to run on-device
|
195 |
-
unet_target_model = unet_compile_job.get_target_model()
|
196 |
-
# Trace model
|
197 |
-
vae_decoder_input_shape = vae_decoder_model.get_input_spec()
|
198 |
-
vae_decoder_sample_inputs = vae_decoder_model.sample_inputs()
|
199 |
-
|
200 |
-
traced_vae_decoder_model = torch.jit.trace(vae_decoder_model, [torch.tensor(data[0]) for _, data in vae_decoder_sample_inputs.items()])
|
201 |
-
|
202 |
-
# Compile model on a specific device
|
203 |
-
vae_decoder_compile_job = hub.submit_compile_job(
|
204 |
-
model=traced_vae_decoder_model ,
|
205 |
-
device=device,
|
206 |
-
input_specs=vae_decoder_model.get_input_spec(),
|
207 |
-
)
|
208 |
-
|
209 |
-
# Get target model to run on-device
|
210 |
-
vae_decoder_target_model = vae_decoder_compile_job.get_target_model()
|
211 |
|
212 |
```
|
213 |
|
@@ -222,18 +168,6 @@ provided job URL to view a variety of on-device performance metrics.
|
|
222 |
|
223 |
# Device
|
224 |
device = hub.Device("Samsung Galaxy S23")
|
225 |
-
profile_job_textencoder_quantized = hub.submit_profile_job(
|
226 |
-
model=model_textencoder_quantized,
|
227 |
-
device=device,
|
228 |
-
)
|
229 |
-
profile_job_unet_quantized = hub.submit_profile_job(
|
230 |
-
model=model_unet_quantized,
|
231 |
-
device=device,
|
232 |
-
)
|
233 |
-
profile_job_vaedecoder_quantized = hub.submit_profile_job(
|
234 |
-
model=model_vaedecoder_quantized,
|
235 |
-
device=device,
|
236 |
-
)
|
237 |
|
238 |
```
|
239 |
|
@@ -243,30 +177,6 @@ To verify the accuracy of the model on-device, you can run on-device inference
|
|
243 |
on sample input data on the same cloud hosted device.
|
244 |
```python
|
245 |
|
246 |
-
input_data_textencoder_quantized = model.text_encoder.sample_inputs()
|
247 |
-
inference_job_textencoder_quantized = hub.submit_inference_job(
|
248 |
-
model=model_textencoder_quantized,
|
249 |
-
device=device,
|
250 |
-
inputs=input_data_textencoder_quantized,
|
251 |
-
)
|
252 |
-
on_device_output_textencoder_quantized = inference_job_textencoder_quantized.download_output_data()
|
253 |
-
|
254 |
-
input_data_unet_quantized = model.unet.sample_inputs()
|
255 |
-
inference_job_unet_quantized = hub.submit_inference_job(
|
256 |
-
model=model_unet_quantized,
|
257 |
-
device=device,
|
258 |
-
inputs=input_data_unet_quantized,
|
259 |
-
)
|
260 |
-
on_device_output_unet_quantized = inference_job_unet_quantized.download_output_data()
|
261 |
-
|
262 |
-
input_data_vaedecoder_quantized = model.vae_decoder.sample_inputs()
|
263 |
-
inference_job_vaedecoder_quantized = hub.submit_inference_job(
|
264 |
-
model=model_vaedecoder_quantized,
|
265 |
-
device=device,
|
266 |
-
inputs=input_data_vaedecoder_quantized,
|
267 |
-
)
|
268 |
-
on_device_output_vaedecoder_quantized = inference_job_vaedecoder_quantized.download_output_data()
|
269 |
-
|
270 |
```
|
271 |
With the output of the model, you can compute like PSNR, relative errors or
|
272 |
spot check the output with expected output.
|
@@ -292,7 +202,7 @@ provides instructions on how to use the `.so` shared library or `.bin` context b
|
|
292 |
|
293 |
|
294 |
## View on Qualcomm® AI Hub
|
295 |
-
Get more details on Riffusion's performance across various devices [here](https://aihub.qualcomm.com/models/
|
296 |
Explore all available models on [Qualcomm® AI Hub](https://aihub.qualcomm.com/)
|
297 |
|
298 |
|
|
|
9 |
|
10 |
---
|
11 |
|
12 |
+

|
13 |
|
14 |
# Riffusion: Optimized for Mobile Deployment
|
15 |
## State-of-the-art generative AI model used to generate spectrogram images given any text input. These spectrograms can be converted into audio clips
|
|
|
22 |
|
23 |
This repository provides scripts to run Riffusion on Qualcomm® devices.
|
24 |
More details on model performance across various devices, can be found
|
25 |
+
[here](https://aihub.qualcomm.com/models/riffusion).
|
26 |
|
27 |
|
28 |
### Model Details
|
|
|
58 |
|
59 |
Install the package via pip:
|
60 |
```bash
|
61 |
+
pip install "qai-hub-models[riffusion]"
|
62 |
```
|
63 |
|
64 |
|
|
|
82 |
weights and runs this model on a sample input.
|
83 |
|
84 |
```bash
|
85 |
+
python -m qai_hub_models.models.riffusion.demo
|
86 |
```
|
87 |
|
88 |
The above demo runs a reference implementation of pre-processing, model
|
|
|
91 |
**NOTE**: If you want running in a Jupyter Notebook or Google Colab like
|
92 |
environment, please add the following to your cell (instead of the above).
|
93 |
```
|
94 |
+
%run -m qai_hub_models.models.riffusion.demo
|
95 |
```
|
96 |
|
97 |
|
|
|
104 |
* Accuracy check between PyTorch and on-device outputs.
|
105 |
|
106 |
```bash
|
107 |
+
python -m qai_hub_models.models.riffusion.export
|
108 |
```
|
109 |
```
|
110 |
Profiling Results
|
|
|
139 |
|
140 |
## How does this work?
|
141 |
|
142 |
+
This [export script](https://aihub.qualcomm.com/models/riffusion/qai_hub_models/models/Riffusion/export.py)
|
143 |
leverages [Qualcomm® AI Hub](https://aihub.qualcomm.com/) to optimize, validate, and deploy this model
|
144 |
on-device. Lets go through each step below in detail:
|
145 |
|
146 |
+
Step 1: **Upload compiled model**
|
|
|
|
|
|
|
147 |
|
148 |
+
Upload compiled models from `qai_hub_models.models.riffusion` on hub.
|
149 |
```python
|
150 |
import torch
|
151 |
|
152 |
import qai_hub as hub
|
153 |
+
from qai_hub_models.models.riffusion import Model
|
154 |
|
155 |
# Load the model
|
156 |
+
model = Model.from_precompiled()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
|
158 |
```
|
159 |
|
|
|
168 |
|
169 |
# Device
|
170 |
device = hub.Device("Samsung Galaxy S23")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
172 |
```
|
173 |
|
|
|
177 |
on sample input data on the same cloud hosted device.
|
178 |
```python
|
179 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
```
|
181 |
With the output of the model, you can compute like PSNR, relative errors or
|
182 |
spot check the output with expected output.
|
|
|
202 |
|
203 |
|
204 |
## View on Qualcomm® AI Hub
|
205 |
+
Get more details on Riffusion's performance across various devices [here](https://aihub.qualcomm.com/models/riffusion).
|
206 |
Explore all available models on [Qualcomm® AI Hub](https://aihub.qualcomm.com/)
|
207 |
|
208 |
|