{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "fPrA1gUdvvLK", "outputId": "3ba5f80a-5cf5-48a0-e826-5ac4172fa3d5" }, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "'import pandas as pd\\n\\ndef load_dataset(file_path):\\n # Load the Ewondo sentences from the Excel file\\n df = pd.read_excel(file_path)\\n ewondo_sentences = df[\\'Ewondo\\'].tolist()\\n \\n # Phonetic data and additional info\\n phonetic_data = {\\n \"alphabet\": [\\n \\'Alpha\\', \\'a\\', \\'b\\', \\'d\\', \\'e\\', \\'ə\\', \\'f\\', \\'g\\', \\'i\\', \\'k\\', \\'l\\', \\n \\'m\\', \\'n\\', \\'ŋ\\', \\'o\\', \\'ɔ\\', \\'s\\', \\'t\\', \\'u\\', \\'v\\', \\'w\\', \\'y\\', \\'z\\'\\n ],\\n \"consonants\": [\\n \\'p\\', \\'b\\', \\'t\\', \\'d\\', \\'ʈ\\', \\'ɖ\\', \\'c\\', \\'ɟ\\', \\'k\\', \\'g\\', \\'q\\', \\'ɢ\\', \\n \\'ʔ\\', \\'m\\', \\'ɱ\\', \\'n\\', \\'ɳ\\', \\'ɲ\\', \\'ŋ\\', \\'ɴ\\', \\'ʙ\\', \\'r\\', \\'ʀ\\', \\n \\'ɾ\\', \\'ɽ\\', \\'ɸ\\', \\'β\\', \\'f\\', \\'v\\', \\'θ\\', \\'ð\\', \\'s\\', \\'z\\', \\'ʃ\\', \\n \\'ʒ\\', \\'ʂ\\', \\'ʐ\\', \\'ç\\', \\'ʝ\\', \\'x\\', \\'ɣ\\', \\'χ\\', \\'ʁ\\', \\'ħ\\', \\'ʕ\\', \\n \\'h\\', \\'ɦ\\', \\'ɬ\\', \\'ɮ\\', \\'ʋ\\', \\'ɹ\\', \\'ɻ\\', \\'j\\', \\'ɰ\\', \\'l\\', \\'ɭ\\', \\n \\'ʎ\\', \\'ʟ\\', \\'ƥ\\', \\'ɓ\\', \\'ƭ\\', \\'ɗ\\', \\'ƈ\\', \\'ʄ\\', \\'ƙ\\', \\'ɠ\\', \\'ʠ\\', \\n \\'ʛ\\'\\n ],\\n \"vowels\": [\\n \\'i\\', \\'y\\', \\'ɨ\\', \\'ʉ\\', \\'ɯ\\', \\'u\\', \\'ɪ\\', \\'ʏ\\', \\'ʊ\\', \\'e\\', \\'ø\\', \\n \\'ɘ\\', \\'ɵ\\', \\'ɤ\\', \\'ə\\', \\'ɛ\\', \\'œ\\', \\'ɜ\\', \\'ɞ\\', \\'ʌ\\', \\'ɔ\\', \\n \\'æ\\', \\'ɐ\\', \\'a\\', \\'ɶ\\', \\'ɑ\\', \\'ɒ\\'\\n ],\\n \"numerals\": {\\n \"0\": \"zəzə\",\\n \"1\": \"fɔ́g\",\\n \"2\": \"bɛ̄\",\\n \"3\": \"lɛ́\",\\n \"4\": \"nyii\",\\n \"5\": \"tán\",\\n \"6\": \"saman\",\\n \"7\": \"zəmgbál\",\\n \"8\": \"moom\",\\n \"9\": \"ebûl\",\\n \"10\": \"awôn\",\\n \"11\": \"awôn ai mbɔ́g\",\\n \"12\": \"awôn ai bɛ̄bɛ̄ɛ̄\",\\n \"13\": \"awôn ai bɛ̄lɛ́\",\\n \"14\": \"awôn ai bɛ̄nyii\",\\n \"15\": \"awôn ai bɛ̄tán\",\\n \"16\": \"awôn ai saman\",\\n \"17\": \"awôn ai zəmgbál\",\\n \"18\": \"awôn ai moom\",\\n \"19\": \"awôn ai ebûl\",\\n # Include more numerals here if needed\\n }\\n }\\n \\n return ewondo_sentences, phonetic_data\\n\\n# Example usage\\nfile_path = \"/content/alphabet_and_numbers.xlsx\"\\newondo_sentences, phonetic_data = load_dataset(file_path)\\n\\n# Access the data\\nprint(ewondo_sentences)\\nprint(phonetic_data)'" ], "application/vnd.google.colaboratory.intrinsic+json": { "type": "string" } }, "metadata": {}, "execution_count": 17 } ], "source": [ "import pandas as pd\n", "\n", "def load_dataset(file_path):\n", " # Load the Tupuri sentences from the Excel file\n", " df = pd.read_json(file_path)\n", " tupuri_sentences = df['Tupuri'].tolist()\n", "\n", " # Phonetic data and additional info\n", " phonetic_data = {\n", " \"alphabet\": [\n", " 'Alpha', 'a', 'b', 'd', 'c', 'e', 'ə', 'f', 'g','h', 'i', 'k', 'l',\n", " 'm', 'n', 'ŋ', 'o','p','q','r' ,'ɔ', 's', 't', 'u', 'v', 'w', 'y', 'z'\n", " ],\n", " \"consonants\": [\n", " 'p', 'b', 't', 'd', 'ʈ', 'ɖ', 'c', 'ɟ', 'k', 'g', 'q', 'ɢ',\n", " 'ʔ', 'm', 'ɱ', 'n', 'ɳ', 'ɲ', 'ŋ', 'ɴ', 'ʙ', 'r', 'ʀ',\n", " 'ɾ', 'ɽ', 'ɸ', 'β', 'f', 'v', 'θ', 'ð', 's', 'z', 'ʃ',\n", " 'ʒ', 'ʂ', 'ʐ', 'ç', 'ʝ', 'x', 'ɣ', 'χ', 'ʁ', 'ħ', 'ʕ',\n", " 'h', 'ɦ', 'ɬ', 'ɮ', 'ʋ', 'ɹ', 'ɻ', 'j', 'ɰ', 'l', 'ɭ',\n", " 'ʎ', 'ʟ', 'ƥ', 'ɓ', 'ƭ', 'ɗ', 'ƈ', 'ʄ', 'ƙ', 'ɠ', 'ʠ',\n", " 'ʛ'\n", " ],\n", " \"vowels\": [\n", " 'i', 'y', 'ɨ', 'ʉ', 'ɯ', 'u', 'ɪ', 'ʏ', 'ʊ', 'e', 'ø',\n", " 'ɘ', 'ɵ', 'ɤ', 'ə', 'ɛ', 'œ', 'ɜ', 'ɞ', 'ʌ', 'ɔ',\n", " 'æ', 'ɐ', 'a', 'ɶ', 'ɑ', 'ɒ'\n", " ],\n", " \"numerals\": {\n", " \"0\": \"zəzə\",\n", " \"1\": \"boŋ\",\n", " \"2\": \"ɓog\",\n", " \"3\": \"swa'\",\n", " \"4\": \"Naa\",\n", " \"5\": \"Dwee\",\n", " \"6\": \"hiira\",\n", " \"7\": \"Renam\",\n", " \"8\": \"nenma\",\n", " \"9\": \"kawa'\",\n", " \"10\": \"hwal\",\n", " \"11\": \"hwal ti bon\",\n", " \"12\": \"hwal ti ɓog\",\n", " \"13\": \"hwal ti naa\",\n", " \"14\": \"hwal ti naa\",\n", " \"15\": \"hwal ti dwee\",\n", " \"16\": \"hwal ti hiira\",\n", " \"17\": \"hwal ti renam\",\n", " \"18\": \"hwal ti nenma\",\n", " \"19\": \"hwal ti kawa\",\n", " \"20\": \"do ɓoge\"\n", " # Include more numerals here if needed\n", " }\n", " }\n", "\n", " return tupuri_sentences, phonetic_data\n", "\n", "# Example usage\n", "file_path = \"/content/alphabet_and_numbers.xlsx\"\n", "tupuri_sentences, phonetic_data = load_dataset(file_path)\n", "\n", "# Access the data\n", "print(tupuri_sentences)\n", "print(phonetic_data)" ] }, { "source": [ "## Data loading\n", "\n", "### Subtask:\n", "Load the JSON data into a pandas DataFrame.\n" ], "cell_type": "markdown", "metadata": { "id": "Km_iP6367UWu" } }, { "source": [ "**Reasoning**:\n", "Load the JSON data into a pandas DataFrame and display the first few rows to verify.\n", "\n" ], "cell_type": "markdown", "metadata": { "id": "HIKBdPq77Umj" } }, { "source": [ "import pandas as pd\n", "import json\n", "\n", "try:\n", " with open('english_tupurri_dataset [revisited].json', 'r', encoding='utf-8') as f:\n", " data = json.load(f)\n", " df = pd.DataFrame(data)\n", " display(df.head())\n", "except FileNotFoundError:\n", " print(\"Error: 'english_tupurri_dataset [revisited].json' not found.\")\n", " df = None\n", "except json.JSONDecodeError:\n", " print(\"Error: Invalid JSON format in 'english_tupurri_dataset [revisited].json'.\")\n", " df = None\n", "except Exception as e:\n", " print(f\"An unexpected error occurred: {e}\")\n", " df = None" ], "cell_type": "code", "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 206 }, "id": "Nkh7LiR-7VFx", "outputId": "ecfb347a-ca67-4c6f-a362-ec412f58c48b" }, "execution_count": null, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ " source \\\n", "0 That which was from the beginning, which we ha... \n", "1 (For the life was manifested, and we have seen... \n", "2 That which we have seen and heard declare we u... \n", "3 And these things write we unto you, that your ... \n", "4 This then is the message which we have heard o... \n", "\n", " target \n", "0 Waçaçre maga hay le tañgu äaa mono, wuur laa n... \n", "1 AÀ naa nen waçaçre se ma kol jar tenen go ne j... \n", "2 Fen maga wuur ko ne, wuur laa waçaçre äe mono,... \n", "3 Wuur yer feçeçre sen wo wo wee maga fruygi naa... \n", "4 Co' wee sug waçaçre maga wuur laan le jag äe m... " ], "text/html": [ "\n", "
\n", " | source | \n", "target | \n", "
---|---|---|
0 | \n", "That which was from the beginning, which we ha... | \n", "Waçaçre maga hay le tañgu äaa mono, wuur laa n... | \n", "
1 | \n", "(For the life was manifested, and we have seen... | \n", "AÀ naa nen waçaçre se ma kol jar tenen go ne j... | \n", "
2 | \n", "That which we have seen and heard declare we u... | \n", "Fen maga wuur ko ne, wuur laa waçaçre äe mono,... | \n", "
3 | \n", "And these things write we unto you, that your ... | \n", "Wuur yer feçeçre sen wo wo wee maga fruygi naa... | \n", "
4 | \n", "This then is the message which we have heard o... | \n", "Co' wee sug waçaçre maga wuur laan le jag äe m... | \n", "