YaTharThShaRma999 commited on
Commit
f84b6bd
·
verified ·
1 Parent(s): a3df37c

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +164 -0
README.md ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ language:
4
+ - en
5
+ pipeline_tag: text-to-speech
6
+ ---
7
+
8
+ This is a 4bit awq quantized version of Orpheus-3b FT. I recommend using lmdeploy as its easy to install and the speed is very fast.
9
+ Here is the code to load model, process audio files for voice cloning, and generate speech.
10
+
11
+ Code to load model:
12
+ ```python
13
+ ## Install snac and lmdeploy with pip install snac lmdeploy
14
+
15
+ from lmdeploy import pipeline, TurbomindEngineConfig, GenerationConfig
16
+ from transformers import AutoTokenizer
17
+ from snac import SNAC
18
+
19
+ tp = 1 ## change if you have multiple gpus
20
+ cache_max_entry_count = 0.2 ## how much vram is reserved for context
21
+
22
+ engine_config = TurbomindEngineConfig(model_format='awq', dtype='float16', cache_max_entry_count=cache_max_entry_count, tp=tp, quant_policy=8)
23
+ pipe = pipeline("heydryft/Orpheus-3b-FT-AWQ", backend_config=engine_config)
24
+ tokeniser = AutoTokenizer.from_pretrained("unsloth/orpheus-3b-0.1-ft-unsloth-bnb-4bit")
25
+ snac_model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz").to('cuda:0')
26
+ ```
27
+
28
+
29
+ Code to convert voice file into snac tokens for voice cloning
30
+ ```python
31
+ import librosa
32
+ import torch
33
+ from IPython.display import Audio
34
+ import gc
35
+ import torch
36
+ from pydub import AudioSegment
37
+ tokenizer = tokeniser
38
+
39
+ my_wav_file_is = "test.mp3" ## path to your reference audio file
40
+
41
+ and_the_transcript_is = "" ## transcript of the audio file
42
+
43
+ filename = my_wav_file_is
44
+
45
+ audio_array, sample_rate = librosa.load(filename)
46
+
47
+ def tokenise_audio(waveform):
48
+ waveform = torch.from_numpy(waveform).unsqueeze(0)
49
+ waveform = waveform.to(dtype=torch.float32)
50
+
51
+
52
+ waveform = waveform.unsqueeze(0).to('cuda:0')
53
+
54
+ with torch.inference_mode():
55
+ codes = snac_model.encode(waveform)
56
+
57
+ all_codes = []
58
+ for i in range(codes[0].shape[1]):
59
+ all_codes.append(codes[0][0][i].item()+128266)
60
+ all_codes.append(codes[1][0][2*i].item()+128266+4096)
61
+ all_codes.append(codes[2][0][4*i].item()+128266+(2*4096))
62
+ all_codes.append(codes[2][0][(4*i)+1].item()+128266+(3*4096))
63
+ all_codes.append(codes[1][0][(2*i)+1].item()+128266+(4*4096))
64
+ all_codes.append(codes[2][0][(4*i)+2].item()+128266+(5*4096))
65
+ all_codes.append(codes[2][0][(4*i)+3].item()+128266+(6*4096))
66
+
67
+
68
+ return all_codes
69
+
70
+ myts = tokenise_audio(audio_array) ## the snac tokens
71
+
72
+ gc.collect()
73
+ torch.cuda.empty_cache()
74
+ ```
75
+
76
+ Finally, generate speech and display it using IPython
77
+ ```python
78
+ from lmdeploy import GenerationConfig
79
+ import gc
80
+ import torch
81
+
82
+ ### sampling params are heavily experimental, try to experiment with them.
83
+ gen_config = GenerationConfig(top_p=0.7,
84
+ top_k=50,
85
+ temperature=0.2,
86
+ max_new_tokens=1024,
87
+ min_new_tokens=30,
88
+ stop_token_ids=[128009, 128001, 49158, 128258],
89
+ repetition_penalty=2.0,
90
+ skip_special_tokens=False,
91
+ do_sample=True,
92
+ min_p=0.6)
93
+
94
+ prompt = and_the_transcript_is + "<laugh> So um hey, like what's up??" ## put prompt here
95
+
96
+ voice_name = "zac" ## experimental, might be removed or not
97
+ response2 = pipe([f"<custom_token_3><|begin_of_text|>{voice_name}: {prompt}<|eot_id|><custom_token_4><custom_token_5><custom_token_1>" + tokeniser.decode(myts)], gen_config=gen_config)
98
+ gc.collect()
99
+ torch.cuda.empty_cache()
100
+
101
+ generated_ids = tokeniser.encode(response2[0].text, return_tensors='pt', add_special_tokens=False)
102
+
103
+ token_to_find = 128257
104
+ token_to_remove = 128258
105
+
106
+ token_indices = (generated_ids == token_to_find).nonzero(as_tuple=True)
107
+
108
+ if len(token_indices[1]) > 0:
109
+ last_occurrence_idx = token_indices[1][-1].item()
110
+ cropped_tensor = generated_ids[:, last_occurrence_idx+1:]
111
+ else:
112
+ cropped_tensor = generated_ids
113
+ mask = cropped_tensor != token_to_remove
114
+
115
+ processed_rows = []
116
+
117
+ for row in cropped_tensor:
118
+ masked_row = row[row != token_to_remove]
119
+ processed_rows.append(masked_row)
120
+
121
+ code_lists = []
122
+
123
+ for row in processed_rows:
124
+ row_length = row.size(0)
125
+ new_length = (row_length // 7) * 7
126
+ trimmed_row = row[:new_length]
127
+ trimmed_row = [t - 128266 for t in trimmed_row]
128
+ code_lists.append(trimmed_row)
129
+
130
+
131
+ def redistribute_codes(code_list):
132
+ layer_1 = []
133
+ layer_2 = []
134
+ layer_3 = []
135
+ for i in range((len(code_list)+1)//7):
136
+ layer_1.append(code_list[7*i])
137
+ layer_2.append(code_list[7*i+1]-4096)
138
+ layer_3.append(code_list[7*i+2]-(2*4096))
139
+ layer_3.append(code_list[7*i+3]-(3*4096))
140
+ layer_2.append(code_list[7*i+4]-(4*4096))
141
+ layer_3.append(code_list[7*i+5]-(5*4096))
142
+ layer_3.append(code_list[7*i+6]-(6*4096))
143
+ codes = [torch.tensor(layer_1).unsqueeze(0).to('cuda:0'),
144
+ torch.tensor(layer_2).unsqueeze(0).to('cuda:0'),
145
+ torch.tensor(layer_3).unsqueeze(0).to('cuda:0')]
146
+
147
+ audio_hat = snac_model.decode(codes)
148
+ return audio_hat
149
+
150
+ my_samples = []
151
+ for code_list in code_lists:
152
+ samples = redistribute_codes(code_list)
153
+ my_samples.append(samples)
154
+ from IPython.display import display, Audio
155
+
156
+ display(Audio(samples.detach().squeeze().to("cpu").numpy(), rate=24000))
157
+
158
+ del my_samples,samples, code_lists, mask, cropped_tensor, processed_rows
159
+ gc.collect()
160
+ torch.cuda.empty_cache()
161
+ ```
162
+
163
+
164
+