Upload pack_restore_parquet.py with huggingface_hub
Browse files- pack_restore_parquet.py +98 -0
pack_restore_parquet.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import pyarrow.parquet as pq
|
3 |
+
import pyarrow as pa
|
4 |
+
import os
|
5 |
+
import json
|
6 |
+
import argparse
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
def pack_to_parquet(json_path, audio_dir, tokens_dir, output_dir, batch_size=1000):
|
10 |
+
os.makedirs(output_dir, exist_ok=True)
|
11 |
+
with open(json_path, 'r') as f:
|
12 |
+
data = json.load(f)
|
13 |
+
|
14 |
+
schema = pa.schema([
|
15 |
+
('id', pa.string()),
|
16 |
+
('speech_path', pa.string()),
|
17 |
+
('units_path', pa.string()),
|
18 |
+
('audio_data', pa.binary()),
|
19 |
+
('tokens_data', pa.binary())
|
20 |
+
])
|
21 |
+
|
22 |
+
records = []
|
23 |
+
batch_count = 0
|
24 |
+
|
25 |
+
for item in tqdm(data, desc="Processing records"):
|
26 |
+
speech_filename = os.path.basename(item['speech'])
|
27 |
+
units_filename = os.path.basename(item['units'])
|
28 |
+
audio_path = os.path.join(audio_dir, speech_filename)
|
29 |
+
tokens_path = os.path.join(tokens_dir, units_filename)
|
30 |
+
audio_data = None
|
31 |
+
tokens_data = None
|
32 |
+
if os.path.exists(audio_path):
|
33 |
+
with open(audio_path, 'rb') as f:
|
34 |
+
audio_data = f.read()
|
35 |
+
if os.path.exists(tokens_path):
|
36 |
+
with open(tokens_path, 'rb') as f:
|
37 |
+
tokens_data = f.read()
|
38 |
+
record = {
|
39 |
+
'id': item['id'],
|
40 |
+
'speech_path': speech_filename,
|
41 |
+
'units_path': units_filename,
|
42 |
+
'audio_data': audio_data,
|
43 |
+
'tokens_data': tokens_data
|
44 |
+
}
|
45 |
+
records.append(record)
|
46 |
+
|
47 |
+
if len(records) >= batch_size:
|
48 |
+
df = pd.DataFrame(records)
|
49 |
+
table = pa.Table.from_pandas(df, schema=schema)
|
50 |
+
output_parquet = os.path.join(output_dir, f'batch_{batch_count}.parquet')
|
51 |
+
pq.write_table(table, output_parquet)
|
52 |
+
print(f"Parquet file saved to: {output_parquet}")
|
53 |
+
batch_count += 1
|
54 |
+
records = []
|
55 |
+
|
56 |
+
if records:
|
57 |
+
df = pd.DataFrame(records)
|
58 |
+
table = pa.Table.from_pandas(df, schema=schema)
|
59 |
+
output_parquet = os.path.join(output_dir, f'batch_{batch_count}.parquet')
|
60 |
+
pq.write_table(table, output_parquet)
|
61 |
+
print(f"Parquet file saved to: {output_parquet}")
|
62 |
+
|
63 |
+
def restore_from_parquet(parquet_dir, output_audio_dir, output_tokens_dir):
|
64 |
+
os.makedirs(output_audio_dir, exist_ok=True)
|
65 |
+
os.makedirs(output_tokens_dir, exist_ok=True)
|
66 |
+
parquet_files = [f for f in os.listdir(parquet_dir) if f.endswith('.parquet')]
|
67 |
+
for parquet_file in tqdm(parquet_files, desc="Restoring Parquet files"):
|
68 |
+
parquet_path = os.path.join(parquet_dir, parquet_file)
|
69 |
+
table = pq.read_table(parquet_path)
|
70 |
+
df = table.to_pandas()
|
71 |
+
for _, row in df.iterrows():
|
72 |
+
if row['audio_data'] is not None:
|
73 |
+
audio_path = os.path.join(output_audio_dir, row['speech_path'])
|
74 |
+
with open(audio_path, 'wb') as f:
|
75 |
+
f.write(row['audio_data'])
|
76 |
+
if row['tokens_data'] is not None:
|
77 |
+
tokens_path = os.path.join(output_tokens_dir, row['units_path'])
|
78 |
+
with open(tokens_path, 'wb') as f:
|
79 |
+
f.write(row['tokens_data'])
|
80 |
+
print(f"Files restored to: {output_audio_dir} and {output_tokens_dir}")
|
81 |
+
|
82 |
+
def main():
|
83 |
+
parser = argparse.ArgumentParser(description='Pack or restore audio and token files using Parquet.')
|
84 |
+
parser.add_argument('--mode', choices=['pack', 'restore'], required=True, help='Mode to run: "pack" to create Parquet files, "restore" to restore files')
|
85 |
+
args = parser.parse_args()
|
86 |
+
json_path = 'VoiceAssistant-430K.json'
|
87 |
+
audio_dir = 'audios'
|
88 |
+
tokens_dir = 'cosyvoice2_tokens'
|
89 |
+
output_parquet_dir = 'cosyvoice2_tokens_and_audios_parquet_files'
|
90 |
+
if args.mode == 'pack':
|
91 |
+
# python pack_restore_parquet.py --mode pack
|
92 |
+
pack_to_parquet(json_path, audio_dir, tokens_dir, output_parquet_dir, batch_size=1000)
|
93 |
+
elif args.mode == 'restore':
|
94 |
+
# python pack_restore_parquet.py --mode restore
|
95 |
+
restore_from_parquet(output_parquet_dir, audio_dir, tokens_dir)
|
96 |
+
|
97 |
+
if __name__ == '__main__':
|
98 |
+
main()
|