{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "fed859ca", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T08:42:53.237199Z", "iopub.status.busy": "2025-03-25T08:42:53.237094Z", "iopub.status.idle": "2025-03-25T08:42:53.397659Z", "shell.execute_reply": "2025-03-25T08:42:53.397337Z" } }, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Endometrioid_Cancer\"\n", "cohort = \"GSE73614\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Endometrioid_Cancer\"\n", "in_cohort_dir = \"../../input/GEO/Endometrioid_Cancer/GSE73614\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Endometrioid_Cancer/GSE73614.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Endometrioid_Cancer/gene_data/GSE73614.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Endometrioid_Cancer/clinical_data/GSE73614.csv\"\n", "json_path = \"../../output/preprocess/Endometrioid_Cancer/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "203674fa", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": 2, "id": "b7d35553", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T08:42:53.399130Z", "iopub.status.busy": "2025-03-25T08:42:53.398984Z", "iopub.status.idle": "2025-03-25T08:42:53.589635Z", "shell.execute_reply": "2025-03-25T08:42:53.589303Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Background Information:\n", "!Series_title\t\"Can High Grade Serous Ovarian Cancer TCGA Gene Expression Signatures be seen in High Grade Endometrioid or Clear Cell Ovarian Cancer?\"\n", "!Series_summary\t\"The goal of the study was to examine the transcriptional profile of ovarian cancer cancers in order to develop validated clinically useful prognostic signatures with the potential to guide therapy decisions. Fresh frozen samples were prospectively collected from a series of 107 consecutive women with high-grade serous ovarian, primary peritonial, or fallopian tube cancer as well as high grade clear cell and endometrioid cancer who underwent surgery by a gynecologic oncologist at Mayo Clinic between 1994 and 2005. All patients received postoperative chemotherapy with a platinum agent, and 75% received a taxane. All patients signed an Institutional Review Board approved consent for bio-banking, clinical data extraction and molecular analysis. Median follow-up time was 35 months (range, 1-202 months). Fourteen patients (8%) were included in the TCGA study.\"\n", "!Series_overall_design\t\"High grade serous, clear cell and endometrioid ovarian tumors (n=107) were compared to a reference pool of 106 ovarian samples. Mixed reference includes normal, benign, borderline, and malignant sample of various histolgies.\"\n", "Sample Characteristics Dictionary:\n", "{0: ['tissue: ovarian']}\n" ] } ], "source": [ "from tools.preprocess import *\n", "# 1. Identify the paths to the SOFT file and the matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. Read the matrix file to obtain background information and sample characteristics data\n", "background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", "clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", "background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", "\n", "# 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", "sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", "\n", "# 4. Explicitly print out all the background information and the sample characteristics dictionary\n", "print(\"Background Information:\")\n", "print(background_info)\n", "print(\"Sample Characteristics Dictionary:\")\n", "print(sample_characteristics_dict)\n" ] }, { "cell_type": "markdown", "id": "2a6138ed", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": 3, "id": "072ca9ae", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T08:42:53.590949Z", "iopub.status.busy": "2025-03-25T08:42:53.590841Z", "iopub.status.idle": "2025-03-25T08:42:53.598586Z", "shell.execute_reply": "2025-03-25T08:42:53.598335Z" } }, "outputs": [ { "data": { "text/plain": [ "False" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import re\n", "import pandas as pd\n", "import numpy as np\n", "import json\n", "import os\n", "from typing import Optional, Callable, Dict, Any, List, Union, Tuple\n", "\n", "# 1. Gene Expression Data Availability\n", "# Based on the background information, this dataset contains transcriptional profiles\n", "# of ovarian cancers, suggesting gene expression data is available\n", "is_gene_available = True\n", "\n", "# 2. Variable Availability and Data Type Conversion\n", "# From the sample characteristics dictionary, we only have 'tissue: ovarian'\n", "# and no information about the trait (Endometrioid_Cancer), age, or gender\n", "\n", "# 2.1 Data Availability\n", "# The sample characteristics don't explicitly mention Endometrioid Cancer\n", "# However, the background info mentions \"high grade clear cell and endometrioid cancer\"\n", "# Since the dataset includes endometrioid cancer samples, we can attempt to identify these from other data\n", "# But given the current information, we don't have a specific row that indicates trait status\n", "trait_row = None\n", "age_row = None\n", "gender_row = None\n", "\n", "# 2.2 Data Type Conversion\n", "# Even though we don't have direct trait information, we'll define conversion functions\n", "# in case we can infer trait status from other data later\n", "def convert_trait(value):\n", " if not value or pd.isna(value):\n", " return None\n", " \n", " # Extract value after colon if present\n", " if isinstance(value, str) and ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " # Check for endometrioid cancer indicators\n", " lower_value = str(value).lower()\n", " if 'endometrioid' in lower_value:\n", " return 1\n", " elif 'not endometrioid' in lower_value or 'serous' in lower_value or 'clear cell' in lower_value:\n", " return 0\n", " else:\n", " return None\n", "\n", "def convert_age(value):\n", " if not value or pd.isna(value):\n", " return None\n", " \n", " # Extract value after colon if present\n", " if isinstance(value, str) and ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " # Try to extract age as a number\n", " try:\n", " # Extract numbers from the string\n", " numbers = re.findall(r'\\d+', str(value))\n", " if numbers:\n", " return float(numbers[0])\n", " except:\n", " pass\n", " \n", " return None\n", "\n", "def convert_gender(value):\n", " if not value or pd.isna(value):\n", " return None\n", " \n", " # Extract value after colon if present\n", " if isinstance(value, str) and ':' in value:\n", " value = value.split(':', 1)[1].strip()\n", " \n", " # Convert gender values to binary (0 for female, 1 for male)\n", " lower_value = str(value).lower()\n", " if 'female' in lower_value or 'f' == lower_value:\n", " return 0\n", " elif 'male' in lower_value or 'm' == lower_value:\n", " return 1\n", " else:\n", " return None\n", "\n", "# 3. Save Metadata\n", "# Since trait_row is None, is_trait_available is False\n", "is_trait_available = trait_row is not None\n", "validate_and_save_cohort_info(\n", " is_final=False,\n", " cohort=cohort,\n", " info_path=json_path,\n", " is_gene_available=is_gene_available,\n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Clinical Feature Extraction\n", "# Since trait_row is None, we skip this substep\n" ] }, { "cell_type": "markdown", "id": "b167255e", "metadata": {}, "source": [ "### Step 3: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": 4, "id": "88e6055c", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T08:42:53.599797Z", "iopub.status.busy": "2025-03-25T08:42:53.599692Z", "iopub.status.idle": "2025-03-25T08:42:53.983492Z", "shell.execute_reply": "2025-03-25T08:42:53.983098Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found data marker at line 88\n", "Header line: \"ID_REF\"\t\"GSM1899433\"\t\"GSM1899434\"\t\"GSM1899435\"\t\"GSM1899436\"\t\"GSM1899437\"\t\"GSM1899438\"\t\"GSM1899439\"\t\"GSM1899440\"\t\"GSM1899441\"\t\"GSM1899442\"\t\"GSM1899443\"\t\"GSM1899444\"\t\"GSM1899445\"\t\"GSM1899446\"\t\"GSM1899447\"\t\"GSM1899448\"\t\"GSM1899449\"\t\"GSM1899450\"\t\"GSM1899451\"\t\"GSM1899452\"\t\"GSM1899453\"\t\"GSM1899454\"\t\"GSM1899455\"\t\"GSM1899456\"\t\"GSM1899457\"\t\"GSM1899458\"\t\"GSM1899459\"\t\"GSM1899460\"\t\"GSM1899461\"\t\"GSM1899462\"\t\"GSM1899463\"\t\"GSM1899464\"\t\"GSM1899465\"\t\"GSM1899466\"\t\"GSM1899467\"\t\"GSM1899468\"\t\"GSM1899469\"\t\"GSM1899470\"\t\"GSM1899471\"\t\"GSM1899472\"\t\"GSM1899473\"\t\"GSM1899474\"\t\"GSM1899475\"\t\"GSM1899476\"\t\"GSM1899477\"\t\"GSM1899478\"\t\"GSM1899479\"\t\"GSM1899480\"\t\"GSM1899481\"\t\"GSM1899482\"\t\"GSM1899483\"\t\"GSM1899484\"\t\"GSM1899485\"\t\"GSM1899486\"\t\"GSM1899487\"\t\"GSM1899488\"\t\"GSM1899489\"\t\"GSM1899490\"\t\"GSM1899491\"\t\"GSM1899492\"\t\"GSM1899493\"\t\"GSM1899494\"\t\"GSM1899495\"\t\"GSM1899496\"\t\"GSM1899497\"\t\"GSM1899498\"\t\"GSM1899499\"\t\"GSM1899500\"\t\"GSM1899501\"\t\"GSM1899502\"\t\"GSM1899503\"\t\"GSM1899504\"\t\"GSM1899505\"\t\"GSM1899506\"\t\"GSM1899507\"\t\"GSM1899508\"\t\"GSM1899509\"\t\"GSM1899510\"\t\"GSM1899511\"\t\"GSM1899512\"\t\"GSM1899513\"\t\"GSM1899514\"\t\"GSM1899515\"\t\"GSM1899516\"\t\"GSM1899517\"\t\"GSM1899518\"\t\"GSM1899519\"\t\"GSM1899520\"\t\"GSM1899521\"\t\"GSM1899522\"\t\"GSM1899523\"\t\"GSM1899524\"\t\"GSM1899525\"\t\"GSM1899526\"\t\"GSM1899527\"\t\"GSM1899528\"\t\"GSM1899529\"\t\"GSM1899530\"\t\"GSM1899531\"\t\"GSM1899532\"\t\"GSM1899533\"\t\"GSM1899534\"\t\"GSM1899535\"\t\"GSM1899536\"\t\"GSM1899537\"\t\"GSM1899538\"\t\"GSM1899539\"\n", "First data line: \"A_23_P100001\"\t0.22837\t0.16814\t-0.15777\t-0.03722\t0.43297\t0.18792\t-0.20941\t0.40694\t-0.19426\t0.48843\t-0.43983\t0.2141\t0.23937\t-0.155\t0.10504\t0.14679\t-0.04914\t0.00052\t0.06773\t0.44417\t0.02696\t0.25367\t0.1072\t-0.22499\t0.21364\t0.03322\t0.15542\t0.10693\t-0.16495\t-0.2452\t0.26624\t-0.15363\t0.2427\t0.22664\t0.16845\t-0.12428\t0.00128\t-0.15964\t0.09298\t-0.10324\t0.0565\t0.39694\t0.13636\t0.02991\t-0.00885\t0.21583\t0.4635\t0.18625\t-0.12811\t-0.1385\t0.37436\t0.03717\t0.22219\t0.03689\t-0.12896\t-0.3802\t0.15872\t0.02401\t0.07181\t0.04799\t0.42576\t0.35135\t0.24541\t-0.07618\t0.30498\t-0.02031\t0.28361\t-0.27325\t0.27612\t0.54265\t0.24507\t0.31329\t0.0754\t0.0595\t-0.19697\t-0.24974\t0.00898\t0.35472\t-0.36816\t-0.25968\t-0.12905\t0.23938\t-0.04935\t0.14433\t-0.18894\t-0.07497\t-0.39995\t0.13942\t0.17656\t0.34625\t0.21088\t-0.03354\t-0.11384\t0.1219\t0.11367\t-0.26274\t-0.37531\t-0.11861\t-0.17624\t-0.15718\t0.57526\t0.09523\t0.56421\t0.09067\t-0.43787\t-0.41249\t0.15347\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Index(['A_23_P100001', 'A_23_P100011', 'A_23_P100022', 'A_23_P100056',\n", " 'A_23_P100074', 'A_23_P100092', 'A_23_P100103', 'A_23_P100111',\n", " 'A_23_P100127', 'A_23_P100133', 'A_23_P100141', 'A_23_P100156',\n", " 'A_23_P100177', 'A_23_P100189', 'A_23_P100196', 'A_23_P100203',\n", " 'A_23_P100220', 'A_23_P100240', 'A_23_P10025', 'A_23_P100263'],\n", " dtype='object', name='ID')\n" ] } ], "source": [ "# 1. Get the file paths for the SOFT file and matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. First, let's examine the structure of the matrix file to understand its format\n", "import gzip\n", "\n", "# Peek at the first few lines of the file to understand its structure\n", "with gzip.open(matrix_file, 'rt') as file:\n", " # Read first 100 lines to find the header structure\n", " for i, line in enumerate(file):\n", " if '!series_matrix_table_begin' in line:\n", " print(f\"Found data marker at line {i}\")\n", " # Read the next line which should be the header\n", " header_line = next(file)\n", " print(f\"Header line: {header_line.strip()}\")\n", " # And the first data line\n", " first_data_line = next(file)\n", " print(f\"First data line: {first_data_line.strip()}\")\n", " break\n", " if i > 100: # Limit search to first 100 lines\n", " print(\"Matrix table marker not found in first 100 lines\")\n", " break\n", "\n", "# 3. Now try to get the genetic data with better error handling\n", "try:\n", " gene_data = get_genetic_data(matrix_file)\n", " print(gene_data.index[:20])\n", "except KeyError as e:\n", " print(f\"KeyError: {e}\")\n", " \n", " # Alternative approach: manually extract the data\n", " print(\"\\nTrying alternative approach to read the gene data:\")\n", " with gzip.open(matrix_file, 'rt') as file:\n", " # Find the start of the data\n", " for line in file:\n", " if '!series_matrix_table_begin' in line:\n", " break\n", " \n", " # Read the headers and data\n", " import pandas as pd\n", " df = pd.read_csv(file, sep='\\t', index_col=0)\n", " print(f\"Column names: {df.columns[:5]}\")\n", " print(f\"First 20 row IDs: {df.index[:20]}\")\n", " gene_data = df\n" ] }, { "cell_type": "markdown", "id": "5069d758", "metadata": {}, "source": [ "### Step 4: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": 5, "id": "689580bf", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T08:42:53.985113Z", "iopub.status.busy": "2025-03-25T08:42:53.984983Z", "iopub.status.idle": "2025-03-25T08:42:53.987114Z", "shell.execute_reply": "2025-03-25T08:42:53.986798Z" } }, "outputs": [], "source": [ "# Looking at the gene identifiers from the previous step, these are Agilent microarray probe identifiers\n", "# (format A_23_P######) rather than human gene symbols.\n", "# These will need to be mapped to gene symbols for biological interpretation.\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "49fb7d36", "metadata": {}, "source": [ "### Step 5: Gene Annotation" ] }, { "cell_type": "code", "execution_count": 6, "id": "a5013799", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T08:42:53.988314Z", "iopub.status.busy": "2025-03-25T08:42:53.988207Z", "iopub.status.idle": "2025-03-25T08:42:54.360586Z", "shell.execute_reply": "2025-03-25T08:42:54.359951Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Examining SOFT file structure:\n", "Line 0: ^DATABASE = GeoMiame\n", "Line 1: !Database_name = Gene Expression Omnibus (GEO)\n", "Line 2: !Database_institute = NCBI NLM NIH\n", "Line 3: !Database_web_link = http://www.ncbi.nlm.nih.gov/geo\n", "Line 4: !Database_email = geo@ncbi.nlm.nih.gov\n", "Line 5: ^SERIES = GSE73614\n", "Line 6: !Series_title = Can High Grade Serous Ovarian Cancer TCGA Gene Expression Signatures be seen in High Grade Endometrioid or Clear Cell Ovarian Cancer?\n", "Line 7: !Series_geo_accession = GSE73614\n", "Line 8: !Series_status = Public on Oct 01 2015\n", "Line 9: !Series_submission_date = Sep 30 2015\n", "Line 10: !Series_last_update_date = Oct 07 2019\n", "Line 11: !Series_pubmed_id = 27016234\n", "Line 12: !Series_summary = The goal of the study was to examine the transcriptional profile of ovarian cancer cancers in order to develop validated clinically useful prognostic signatures with the potential to guide therapy decisions. Fresh frozen samples were prospectively collected from a series of 107 consecutive women with high-grade serous ovarian, primary peritonial, or fallopian tube cancer as well as high grade clear cell and endometrioid cancer who underwent surgery by a gynecologic oncologist at Mayo Clinic between 1994 and 2005. All patients received postoperative chemotherapy with a platinum agent, and 75% received a taxane. All patients signed an Institutional Review Board approved consent for bio-banking, clinical data extraction and molecular analysis. Median follow-up time was 35 months (range, 1-202 months). Fourteen patients (8%) were included in the TCGA study.\n", "Line 13: !Series_overall_design = High grade serous, clear cell and endometrioid ovarian tumors (n=107) were compared to a reference pool of 106 ovarian samples. Mixed reference includes normal, benign, borderline, and malignant sample of various histolgies.\n", "Line 14: !Series_type = Expression profiling by array\n", "Line 15: !Series_contributor = Habib,,Hamidi\n", "Line 16: !Series_contributor = Boris,,Winterhoff\n", "Line 17: !Series_contributor = Chen,,Wang\n", "Line 18: !Series_contributor = Kimberly,,Kalli\n", "Line 19: !Series_contributor = Brooke,,Fridley\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "Gene annotation preview:\n", "{'ID': ['A_23_P100001', 'A_23_P100011', 'A_23_P100022', 'A_23_P100056', 'A_23_P100074'], 'SPOT_ID': ['A_23_P100001', 'A_23_P100011', 'A_23_P100022', 'A_23_P100056', 'A_23_P100074'], 'CONTROL_TYPE': ['FALSE', 'FALSE', 'FALSE', 'FALSE', 'FALSE'], 'REFSEQ': ['NM_207446', 'NM_005829', 'NM_014848', 'NM_194272', 'NM_020371'], 'GB_ACC': ['NM_207446', 'NM_005829', 'NM_014848', 'NM_194272', 'NM_020371'], 'GENE': [400451.0, 10239.0, 9899.0, 348093.0, 57099.0], 'GENE_SYMBOL': ['FAM174B', 'AP3S2', 'SV2B', 'RBPMS2', 'AVEN'], 'GENE_NAME': ['family with sequence similarity 174, member B', 'adaptor-related protein complex 3, sigma 2 subunit', 'synaptic vesicle glycoprotein 2B', 'RNA binding protein with multiple splicing 2', 'apoptosis, caspase activation inhibitor'], 'UNIGENE_ID': ['Hs.27373', 'Hs.632161', 'Hs.21754', 'Hs.436518', 'Hs.555966'], 'ENSEMBL_ID': ['ENST00000557398', nan, 'ENST00000557410', 'ENST00000300069', 'ENST00000306730'], 'TIGR_ID': [nan, nan, nan, nan, nan], 'ACCESSION_STRING': ['ref|NM_207446|ens|ENST00000557398|ens|ENST00000553393|ens|ENST00000327355', 'ref|NM_005829|ref|NM_001199058|ref|NR_023361|ref|NR_037582', 'ref|NM_014848|ref|NM_001167580|ens|ENST00000557410|ens|ENST00000330276', 'ref|NM_194272|ens|ENST00000300069|gb|AK127873|gb|AK124123', 'ref|NM_020371|ens|ENST00000306730|gb|AF283508|gb|BC010488'], 'CHROMOSOMAL_LOCATION': ['chr15:93160848-93160789', 'chr15:90378743-90378684', 'chr15:91838329-91838388', 'chr15:65032375-65032316', 'chr15:34158739-34158680'], 'CYTOBAND': ['hs|15q26.1', 'hs|15q26.1', 'hs|15q26.1', 'hs|15q22.31', 'hs|15q14'], 'DESCRIPTION': ['Homo sapiens family with sequence similarity 174, member B (FAM174B), mRNA [NM_207446]', 'Homo sapiens adaptor-related protein complex 3, sigma 2 subunit (AP3S2), transcript variant 1, mRNA [NM_005829]', 'Homo sapiens synaptic vesicle glycoprotein 2B (SV2B), transcript variant 1, mRNA [NM_014848]', 'Homo sapiens RNA binding protein with multiple splicing 2 (RBPMS2), mRNA [NM_194272]', 'Homo sapiens apoptosis, caspase activation inhibitor (AVEN), mRNA [NM_020371]'], 'GO_ID': ['GO:0016020(membrane)|GO:0016021(integral to membrane)', 'GO:0005794(Golgi apparatus)|GO:0006886(intracellular protein transport)|GO:0008565(protein transporter activity)|GO:0016020(membrane)|GO:0016192(vesicle-mediated transport)|GO:0030117(membrane coat)|GO:0030659(cytoplasmic vesicle membrane)|GO:0031410(cytoplasmic vesicle)', 'GO:0001669(acrosomal vesicle)|GO:0006836(neurotransmitter transport)|GO:0016020(membrane)|GO:0016021(integral to membrane)|GO:0022857(transmembrane transporter activity)|GO:0030054(cell junction)|GO:0030672(synaptic vesicle membrane)|GO:0031410(cytoplasmic vesicle)|GO:0045202(synapse)', 'GO:0000166(nucleotide binding)|GO:0003676(nucleic acid binding)', 'GO:0005515(protein binding)|GO:0005622(intracellular)|GO:0005624(membrane fraction)|GO:0006915(apoptosis)|GO:0006916(anti-apoptosis)|GO:0012505(endomembrane system)|GO:0016020(membrane)'], 'SEQUENCE': ['ATCTCATGGAAAAGCTGGATTCCTCTGCCTTACGCAGAAACACCCGGGCTCCATCTGCCA', 'TCAAGTATTGGCCTGACATAGAGTCCTTAAGACAAGCAAAGACAAGCAAGGCAAGCACGT', 'ATGTCGGCTGTGGAGGGTTAAAGGGATGAGGCTTTCCTTTGTTTAGCAAATCTGTTCACA', 'CCCTGTCAGATAAGTTTAATGTTTAGTTTGAGGCATGAAGAAGAAAAGGGTTTCCATTCT', 'GACCAGCCAGTTTACAAGCATGTCTCAAGCTAGTGTGTTCCATTATGCTCACAGCAGTAA']}\n" ] } ], "source": [ "# 1. Let's first examine the structure of the SOFT file before trying to parse it\n", "import gzip\n", "\n", "# Look at the first few lines of the SOFT file to understand its structure\n", "print(\"Examining SOFT file structure:\")\n", "try:\n", " with gzip.open(soft_file, 'rt') as file:\n", " # Read first 20 lines to understand the file structure\n", " for i, line in enumerate(file):\n", " if i < 20:\n", " print(f\"Line {i}: {line.strip()}\")\n", " else:\n", " break\n", "except Exception as e:\n", " print(f\"Error reading SOFT file: {e}\")\n", "\n", "# 2. Now let's try a more robust approach to extract the gene annotation\n", "# Instead of using the library function which failed, we'll implement a custom approach\n", "try:\n", " # First, look for the platform section which contains gene annotation\n", " platform_data = []\n", " with gzip.open(soft_file, 'rt') as file:\n", " in_platform_section = False\n", " for line in file:\n", " if line.startswith('^PLATFORM'):\n", " in_platform_section = True\n", " continue\n", " if in_platform_section and line.startswith('!platform_table_begin'):\n", " # Next line should be the header\n", " header = next(file).strip()\n", " platform_data.append(header)\n", " # Read until the end of the platform table\n", " for table_line in file:\n", " if table_line.startswith('!platform_table_end'):\n", " break\n", " platform_data.append(table_line.strip())\n", " break\n", " \n", " # If we found platform data, convert it to a DataFrame\n", " if platform_data:\n", " import pandas as pd\n", " import io\n", " platform_text = '\\n'.join(platform_data)\n", " gene_annotation = pd.read_csv(io.StringIO(platform_text), delimiter='\\t', \n", " low_memory=False, on_bad_lines='skip')\n", " print(\"\\nGene annotation preview:\")\n", " print(preview_df(gene_annotation))\n", " else:\n", " print(\"Could not find platform table in SOFT file\")\n", " \n", " # Try an alternative approach - extract mapping from other sections\n", " with gzip.open(soft_file, 'rt') as file:\n", " for line in file:\n", " if 'ANNOTATION information' in line or 'annotation information' in line:\n", " print(f\"Found annotation information: {line.strip()}\")\n", " if line.startswith('!Platform_title') or line.startswith('!platform_title'):\n", " print(f\"Platform title: {line.strip()}\")\n", " \n", "except Exception as e:\n", " print(f\"Error processing gene annotation: {e}\")\n" ] }, { "cell_type": "markdown", "id": "f9375b22", "metadata": {}, "source": [ "### Step 6: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": 7, "id": "e5a3dc8c", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T08:42:54.362411Z", "iopub.status.busy": "2025-03-25T08:42:54.362297Z", "iopub.status.idle": "2025-03-25T08:42:54.501877Z", "shell.execute_reply": "2025-03-25T08:42:54.501254Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Gene expression data after mapping to gene symbols:\n", "Shape: (18488, 107)\n", "First few gene symbols: Index(['A1BG', 'A1BG-AS1', 'A1CF', 'A2LD1', 'A2M', 'A2ML1', 'A4GALT', 'A4GNT',\n", " 'AAAS', 'AACS'],\n", " dtype='object', name='Gene')\n" ] } ], "source": [ "# 1. Identify the relevant columns for mapping\n", "# From the gene_annotation preview, we can see:\n", "# - 'ID' column contains probe identifiers (A_23_P######) which match the gene expression data's indices\n", "# - 'GENE_SYMBOL' column contains the human gene symbols we want to map to\n", "\n", "# 2. Create the gene mapping dataframe\n", "gene_mapping = get_gene_mapping(gene_annotation, prob_col='ID', gene_col='GENE_SYMBOL')\n", "\n", "# 3. Apply gene mapping to convert probe-level measurements to gene expression\n", "# This handles the many-to-many mapping by dividing expression values when one probe maps\n", "# to multiple genes, then summing all probe contributions for each gene\n", "gene_data = apply_gene_mapping(gene_data, gene_mapping)\n", "\n", "# Preview the gene expression data after mapping\n", "print(\"Gene expression data after mapping to gene symbols:\")\n", "print(f\"Shape: {gene_data.shape}\")\n", "print(f\"First few gene symbols: {gene_data.index[:10]}\")\n" ] }, { "cell_type": "markdown", "id": "f2516d28", "metadata": {}, "source": [ "### Step 7: Data Normalization and Linking" ] }, { "cell_type": "code", "execution_count": 8, "id": "9f3a6e24", "metadata": { "execution": { "iopub.execute_input": "2025-03-25T08:42:54.503762Z", "iopub.status.busy": "2025-03-25T08:42:54.503644Z", "iopub.status.idle": "2025-03-25T08:42:55.615715Z", "shell.execute_reply": "2025-03-25T08:42:55.615102Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Normalized gene data shape: (18247, 107)\n", "First few genes with their expression values after normalization:\n", " GSM1899433 GSM1899434 GSM1899435 GSM1899436 GSM1899437 \\\n", "Gene \n", "A1BG -0.38515 0.02804 0.22406 0.06810 -0.41163 \n", "A1BG-AS1 -0.14558 0.09419 0.22782 0.04897 -0.18101 \n", "A1CF -0.05427 0.00154 -0.08773 -0.04492 -0.00743 \n", "A2M -0.29231 -0.14008 0.01138 -0.24503 -0.57375 \n", "A2ML1 0.27110 0.31217 0.21735 0.26602 0.21648 \n", "\n", " GSM1899438 GSM1899439 GSM1899440 GSM1899441 GSM1899442 ... \\\n", "Gene ... \n", "A1BG -0.04018 0.22088 0.04537 0.16108 0.12155 ... \n", "A1BG-AS1 -0.02011 0.18320 0.24031 0.11782 -0.02379 ... \n", "A1CF -0.04886 -0.00683 -0.02361 -0.10952 0.09634 ... \n", "A2M -0.18806 -0.18277 -0.17873 0.29688 -0.48783 ... \n", "A2ML1 0.31937 0.11851 0.17474 0.40601 0.24820 ... \n", "\n", " GSM1899530 GSM1899531 GSM1899532 GSM1899533 GSM1899534 \\\n", "Gene \n", "A1BG 0.40302 0.32117 -0.36939 -0.48676 -0.35284 \n", "A1BG-AS1 0.13220 0.27727 -0.05263 -0.19545 -0.13791 \n", "A1CF -0.01513 -0.01443 -0.09942 1.45167 -0.02172 \n", "A2M -0.40910 -0.01202 -0.44170 -0.28565 -0.34341 \n", "A2ML1 0.30028 0.31647 0.18314 0.12257 0.14766 \n", "\n", " GSM1899535 GSM1899536 GSM1899537 GSM1899538 GSM1899539 \n", "Gene \n", "A1BG -0.52938 -0.16470 0.14649 0.73986 0.09743 \n", "A1BG-AS1 -0.21936 -0.05209 0.13885 0.33579 0.12575 \n", "A1CF -0.10273 -0.06247 -0.02348 0.02711 0.03525 \n", "A2M -0.28140 -0.28411 -0.57818 -0.25053 -0.30416 \n", "A2ML1 0.12236 0.26223 0.25747 0.26194 0.21353 \n", "\n", "[5 rows x 107 columns]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Normalized gene data saved to ../../output/preprocess/Endometrioid_Cancer/gene_data/GSE73614.csv\n", "Trait row is None. Cannot extract trait information from clinical data.\n", "Abnormality detected in the cohort: GSE73614. Preprocessing failed.\n", "Data was determined to be unusable due to missing trait indicators and was not saved\n" ] } ], "source": [ "# 1. Normalize gene symbols in the gene expression data\n", "normalized_gene_data = normalize_gene_symbols_in_index(gene_data)\n", "print(f\"Normalized gene data shape: {normalized_gene_data.shape}\")\n", "print(\"First few genes with their expression values after normalization:\")\n", "print(normalized_gene_data.head())\n", "\n", "# Save the normalized gene data\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "normalized_gene_data.to_csv(out_gene_data_file)\n", "print(f\"Normalized gene data saved to {out_gene_data_file}\")\n", "\n", "# 2. Check if trait data is available before proceeding with clinical data extraction\n", "if trait_row is None:\n", " print(\"Trait row is None. Cannot extract trait information from clinical data.\")\n", " # Create an empty dataframe for clinical features\n", " clinical_features = pd.DataFrame()\n", " \n", " # Create an empty dataframe for linked data\n", " linked_data = pd.DataFrame()\n", " \n", " # Validate and save cohort info\n", " validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=False, # Trait data is not available\n", " is_biased=True, # Not applicable but required\n", " df=pd.DataFrame(), # Empty dataframe\n", " note=\"Dataset contains gene expression data but lacks clear trait indicators for Duchenne Muscular Dystrophy status.\"\n", " )\n", " print(\"Data was determined to be unusable due to missing trait indicators and was not saved\")\n", "else:\n", " try:\n", " # Get the file paths for the matrix file to extract clinical data\n", " _, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", " \n", " # Get raw clinical data from the matrix file\n", " _, clinical_raw = get_background_and_clinical_data(matrix_file)\n", " \n", " # Verify clinical data structure\n", " print(\"Raw clinical data shape:\", clinical_raw.shape)\n", " \n", " # Extract clinical features using the defined conversion functions\n", " clinical_features = geo_select_clinical_features(\n", " clinical_df=clinical_raw,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " print(\"Clinical features:\")\n", " print(clinical_features)\n", " \n", " # Save clinical features to file\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " clinical_features.to_csv(out_clinical_data_file)\n", " print(f\"Clinical features saved to {out_clinical_data_file}\")\n", " \n", " # 3. Link clinical and genetic data\n", " linked_data = geo_link_clinical_genetic_data(clinical_features, normalized_gene_data)\n", " print(f\"Linked data shape: {linked_data.shape}\")\n", " print(\"Linked data preview (first 5 rows, first 5 columns):\")\n", " print(linked_data.iloc[:5, :5])\n", " \n", " # 4. Handle missing values\n", " print(\"Missing values before handling:\")\n", " print(f\" Trait ({trait}) missing: {linked_data[trait].isna().sum()} out of {len(linked_data)}\")\n", " if 'Age' in linked_data.columns:\n", " print(f\" Age missing: {linked_data['Age'].isna().sum()} out of {len(linked_data)}\")\n", " if 'Gender' in linked_data.columns:\n", " print(f\" Gender missing: {linked_data['Gender'].isna().sum()} out of {len(linked_data)}\")\n", " \n", " gene_cols = [col for col in linked_data.columns if col not in [trait, 'Age', 'Gender']]\n", " print(f\" Genes with >20% missing: {sum(linked_data[gene_cols].isna().mean() > 0.2)}\")\n", " print(f\" Samples with >5% missing genes: {sum(linked_data[gene_cols].isna().mean(axis=1) > 0.05)}\")\n", " \n", " cleaned_data = handle_missing_values(linked_data, trait)\n", " print(f\"Data shape after handling missing values: {cleaned_data.shape}\")\n", " \n", " # 5. Evaluate bias in trait and demographic features\n", " is_trait_biased = False\n", " if len(cleaned_data) > 0:\n", " trait_biased, cleaned_data = judge_and_remove_biased_features(cleaned_data, trait)\n", " is_trait_biased = trait_biased\n", " else:\n", " print(\"No data remains after handling missing values.\")\n", " is_trait_biased = True\n", " \n", " # 6. Final validation and save\n", " is_usable = validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=True, \n", " is_biased=is_trait_biased, \n", " df=cleaned_data,\n", " note=\"Dataset contains gene expression data comparing Duchenne muscular dystrophy vs healthy samples.\"\n", " )\n", " \n", " # 7. Save if usable\n", " if is_usable and len(cleaned_data) > 0:\n", " os.makedirs(os.path.dirname(out_data_file), exist_ok=True)\n", " cleaned_data.to_csv(out_data_file)\n", " print(f\"Linked data saved to {out_data_file}\")\n", " else:\n", " print(\"Data was determined to be unusable or empty and was not saved\")\n", " \n", " except Exception as e:\n", " print(f\"Error processing data: {e}\")\n", " # Handle the error case by still recording cohort info\n", " validate_and_save_cohort_info(\n", " is_final=True, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=True, \n", " is_trait_available=False, # Mark as not available due to processing issues\n", " is_biased=True, \n", " df=pd.DataFrame(), # Empty dataframe\n", " note=f\"Error processing data: {str(e)}\"\n", " )\n", " print(\"Data was determined to be unusable and was not saved\")" ] } ], "metadata": { "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.16" } }, "nbformat": 4, "nbformat_minor": 5 }