|
import pandas as pd |
|
import pyarrow.parquet as pq |
|
import pyarrow as pa |
|
import os |
|
import json |
|
import argparse |
|
from tqdm import tqdm |
|
|
|
def pack_to_parquet(json_path, audio_dir, tokens_dir, output_dir, batch_size=1000): |
|
os.makedirs(output_dir, exist_ok=True) |
|
with open(json_path, 'r') as f: |
|
data = json.load(f) |
|
|
|
schema = pa.schema([ |
|
('id', pa.string()), |
|
('speech_path', pa.string()), |
|
('units_path', pa.string()), |
|
('audio_data', pa.binary()), |
|
('tokens_data', pa.binary()) |
|
]) |
|
|
|
records = [] |
|
batch_count = 0 |
|
|
|
for item in tqdm(data, desc="Processing records"): |
|
speech_filename = os.path.basename(item['speech']) |
|
units_filename = os.path.basename(item['units']) |
|
audio_path = os.path.join(audio_dir, speech_filename) |
|
tokens_path = os.path.join(tokens_dir, units_filename) |
|
audio_data = None |
|
tokens_data = None |
|
if os.path.exists(audio_path): |
|
with open(audio_path, 'rb') as f: |
|
audio_data = f.read() |
|
if os.path.exists(tokens_path): |
|
with open(tokens_path, 'rb') as f: |
|
tokens_data = f.read() |
|
record = { |
|
'id': item['id'], |
|
'speech_path': speech_filename, |
|
'units_path': units_filename, |
|
'audio_data': audio_data, |
|
'tokens_data': tokens_data |
|
} |
|
records.append(record) |
|
|
|
if len(records) >= batch_size: |
|
df = pd.DataFrame(records) |
|
table = pa.Table.from_pandas(df, schema=schema) |
|
output_parquet = os.path.join(output_dir, f'batch_{batch_count}.parquet') |
|
pq.write_table(table, output_parquet) |
|
print(f"Parquet file saved to: {output_parquet}") |
|
batch_count += 1 |
|
records = [] |
|
|
|
if records: |
|
df = pd.DataFrame(records) |
|
table = pa.Table.from_pandas(df, schema=schema) |
|
output_parquet = os.path.join(output_dir, f'batch_{batch_count}.parquet') |
|
pq.write_table(table, output_parquet) |
|
print(f"Parquet file saved to: {output_parquet}") |
|
|
|
def restore_from_parquet(parquet_dir, output_audio_dir, output_tokens_dir): |
|
os.makedirs(output_audio_dir, exist_ok=True) |
|
os.makedirs(output_tokens_dir, exist_ok=True) |
|
parquet_files = [f for f in os.listdir(parquet_dir) if f.endswith('.parquet')] |
|
for parquet_file in tqdm(parquet_files, desc="Restoring Parquet files"): |
|
parquet_path = os.path.join(parquet_dir, parquet_file) |
|
table = pq.read_table(parquet_path) |
|
df = table.to_pandas() |
|
for _, row in df.iterrows(): |
|
if row['audio_data'] is not None: |
|
audio_path = os.path.join(output_audio_dir, row['speech_path']) |
|
with open(audio_path, 'wb') as f: |
|
f.write(row['audio_data']) |
|
if row['tokens_data'] is not None: |
|
tokens_path = os.path.join(output_tokens_dir, row['units_path']) |
|
with open(tokens_path, 'wb') as f: |
|
f.write(row['tokens_data']) |
|
print(f"Files restored to: {output_audio_dir} and {output_tokens_dir}") |
|
|
|
def main(): |
|
parser = argparse.ArgumentParser(description='Pack or restore audio and token files using Parquet.') |
|
parser.add_argument('--mode', choices=['pack', 'restore'], required=True, help='Mode to run: "pack" to create Parquet files, "restore" to restore files') |
|
args = parser.parse_args() |
|
json_path = 'VoiceAssistant-430K.json' |
|
audio_dir = 'audios' |
|
tokens_dir = 'cosyvoice2_tokens' |
|
output_parquet_dir = 'cosyvoice2_tokens_and_audios_parquet_files' |
|
if args.mode == 'pack': |
|
|
|
pack_to_parquet(json_path, audio_dir, tokens_dir, output_parquet_dir, batch_size=1000) |
|
elif args.mode == 'restore': |
|
|
|
restore_from_parquet(output_parquet_dir, audio_dir, tokens_dir) |
|
|
|
if __name__ == '__main__': |
|
main() |