{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "d9076d8c", "metadata": {}, "outputs": [], "source": [ "import sys\n", "import os\n", "sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../..')))\n", "\n", "# Path Configuration\n", "from tools.preprocess import *\n", "\n", "# Processing context\n", "trait = \"Alopecia\"\n", "cohort = \"GSE81071\"\n", "\n", "# Input paths\n", "in_trait_dir = \"../../input/GEO/Alopecia\"\n", "in_cohort_dir = \"../../input/GEO/Alopecia/GSE81071\"\n", "\n", "# Output paths\n", "out_data_file = \"../../output/preprocess/Alopecia/GSE81071.csv\"\n", "out_gene_data_file = \"../../output/preprocess/Alopecia/gene_data/GSE81071.csv\"\n", "out_clinical_data_file = \"../../output/preprocess/Alopecia/clinical_data/GSE81071.csv\"\n", "json_path = \"../../output/preprocess/Alopecia/cohort_info.json\"\n" ] }, { "cell_type": "markdown", "id": "ceb11d68", "metadata": {}, "source": [ "### Step 1: Initial Data Loading" ] }, { "cell_type": "code", "execution_count": null, "id": "6cf428f9", "metadata": {}, "outputs": [], "source": [ "from tools.preprocess import *\n", "# 1. Identify the paths to the SOFT file and the matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. Read the matrix file to obtain background information and sample characteristics data\n", "background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']\n", "clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']\n", "background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)\n", "\n", "# 3. Obtain the sample characteristics dictionary from the clinical dataframe\n", "sample_characteristics_dict = get_unique_values_by_row(clinical_data)\n", "\n", "# 4. Explicitly print out all the background information and the sample characteristics dictionary\n", "print(\"Background Information:\")\n", "print(background_info)\n", "print(\"Sample Characteristics Dictionary:\")\n", "print(sample_characteristics_dict)\n" ] }, { "cell_type": "markdown", "id": "0f491e13", "metadata": {}, "source": [ "### Step 2: Dataset Analysis and Clinical Feature Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "21767180", "metadata": {}, "outputs": [], "source": [ "# 1. Gene Expression Availability Analysis\n", "# Based on background info, this is a gene expression dataset from skin biopsies\n", "is_gene_available = True\n", "\n", "# 2. Variable Availability and Data Type Conversion\n", "\n", "# 2.1 Identifying rows for trait, age, and gender\n", "\n", "# For trait (Alopecia):\n", "# Looking at sample characteristics, there is no explicit mention of alopecia\n", "# But the series title mentions \"discoid lesions (DLE) are often circular and frequently lead to alopecia\"\n", "# We can infer that DLE cases could be considered as potentially having alopecia\n", "trait_row = 1 # \"disease state\" in row 1 contains DLE which can be associated with alopecia\n", "\n", "# For age and gender:\n", "# Neither age nor gender information appears to be available in the sample characteristics\n", "age_row = None\n", "gender_row = None\n", "\n", "# 2.2 Data Type Conversion functions\n", "\n", "def convert_trait(value):\n", " \"\"\"\n", " Convert disease state values to binary for Alopecia trait\n", " DLE is associated with alopecia according to the background info\n", " \"\"\"\n", " if value is None:\n", " return None\n", " \n", " # Extract the value after the colon if present\n", " if \":\" in value:\n", " value = value.split(\":\", 1)[1].strip()\n", " \n", " # Based on background info, DLE is associated with alopecia\n", " if value.lower() == \"dle\":\n", " return 1 # Positive for alopecia risk/condition\n", " elif value.lower() in [\"healthy\", \"normal\", \"scle\"]:\n", " return 0 # Not associated with alopecia\n", " else:\n", " return None\n", "\n", "def convert_age(value):\n", " \"\"\"Placeholder for age conversion - not used since age data is not available\"\"\"\n", " return None\n", "\n", "def convert_gender(value):\n", " \"\"\"Placeholder for gender conversion - not used since gender data is not available\"\"\"\n", " return None\n", "\n", "# 3. Save metadata\n", "# Check if trait data is available (trait_row is not None)\n", "is_trait_available = trait_row is not None\n", "\n", "# Save initial validation information\n", "validate_and_save_cohort_info(\n", " is_final=False, \n", " cohort=cohort, \n", " info_path=json_path, \n", " is_gene_available=is_gene_available, \n", " is_trait_available=is_trait_available\n", ")\n", "\n", "# 4. Clinical Feature Extraction\n", "# Only execute if trait_row is not None\n", "if trait_row is not None:\n", " # Create DataFrame from the sample characteristics dictionary\n", " # The dictionary structure shows rows with lists of values\n", " sample_char_dict = {\n", " 0: ['tissue: Skin', 'disease state: Normal', 'disease state: DLE', 'disease state: SCLE'], \n", " 1: ['disease state: healthy', 'disease state: DLE', 'disease state: sCLE', 'tissue: Skin biopsy']\n", " }\n", " \n", " # Convert sample characteristics dictionary to a DataFrame format\n", " sample_values = []\n", " for i in range(max(sample_char_dict.keys()) + 1):\n", " if i in sample_char_dict:\n", " sample_values.append(sample_char_dict[i])\n", " else:\n", " sample_values.append([])\n", " \n", " clinical_data = pd.DataFrame(sample_values)\n", " \n", " # Extract clinical features\n", " clinical_df = geo_select_clinical_features(\n", " clinical_df=clinical_data,\n", " trait=trait,\n", " trait_row=trait_row,\n", " convert_trait=convert_trait,\n", " age_row=age_row,\n", " convert_age=convert_age,\n", " gender_row=gender_row,\n", " convert_gender=convert_gender\n", " )\n", " \n", " # Preview the processed clinical data\n", " print(\"Preview of processed clinical data:\")\n", " print(preview_df(clinical_df))\n", " \n", " # Save the clinical data\n", " os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)\n", " clinical_df.to_csv(out_clinical_data_file, index=False)\n", " print(f\"Clinical data saved to {out_clinical_data_file}\")\n" ] }, { "cell_type": "markdown", "id": "2d1c9cab", "metadata": {}, "source": [ "### Step 3: Gene Data Extraction" ] }, { "cell_type": "code", "execution_count": null, "id": "853431e6", "metadata": {}, "outputs": [], "source": [ "# 1. First get the file paths again to access the matrix file\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. Use the get_genetic_data function from the library to get the gene_data from the matrix_file\n", "gene_data = get_genetic_data(matrix_file)\n", "\n", "# 3. Print the first 20 row IDs (gene or probe identifiers) for future observation\n", "print(\"First 20 gene/probe identifiers:\")\n", "print(gene_data.index[:20])\n" ] }, { "cell_type": "markdown", "id": "36c762f7", "metadata": {}, "source": [ "### Step 4: Gene Identifier Review" ] }, { "cell_type": "code", "execution_count": null, "id": "8b0919f3", "metadata": {}, "outputs": [], "source": [ "# These identifiers appear to be probe IDs from a microarray platform rather than standard human gene symbols.\n", "# They follow a pattern of numbers followed by \"_at\" which is typical of microarray probe IDs\n", "# (like Affymetrix arrays).\n", "# These would need to be mapped to gene symbols for standardized analysis.\n", "\n", "requires_gene_mapping = True\n" ] }, { "cell_type": "markdown", "id": "0c678032", "metadata": {}, "source": [ "### Step 5: Gene Annotation" ] }, { "cell_type": "code", "execution_count": null, "id": "ec80ab35", "metadata": {}, "outputs": [], "source": [ "# 1. First get the file paths using geo_get_relevant_filepaths function\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# 2. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.\n", "gene_annotation = get_gene_annotation(soft_file)\n", "\n", "# 3. Use the 'preview_df' function from the library to preview the data and print out the results.\n", "print(\"Gene annotation preview:\")\n", "print(preview_df(gene_annotation))\n" ] }, { "cell_type": "markdown", "id": "d4b096d8", "metadata": {}, "source": [ "### Step 6: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": null, "id": "9a7eaa21", "metadata": {}, "outputs": [], "source": [ "Analyzing the current step and previous outputs, I'll correct the gene identifier mapping code to address the format mismatch:\n", "\n", "```python\n", "# 1. First examine the structure of the SOFT file more thoroughly to find gene symbols\n", "# Re-read the SOFT file to look for gene symbol information\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "\n", "# Let's read a portion of the SOFT file to understand its structure better\n", "import gzip\n", "with gzip.open(soft_file, 'rt') as f:\n", " # Read first 100 lines to identify the structure\n", " lines = [next(f).strip() for _ in range(100) if f]\n", "\n", "# Look for lines that might contain gene symbol information\n", "gene_symbol_lines = [line for line in lines if \"symbol\" in line.lower() or \"gene_symbol\" in line.lower()]\n", "print(\"Sample lines containing gene symbol information:\")\n", "for i, line in enumerate(gene_symbol_lines[:5]):\n", " print(f\"{i}: {line}\")\n", "\n", "# Examine the structure of the gene expression data more closely\n", "print(\"\\nStructure of gene expression data:\")\n", "print(f\"Gene data shape: {gene_data.shape}\")\n", "print(f\"Gene data columns (first 5): {list(gene_data.columns)[:5]}\")\n", "print(f\"Gene data index format (first 5): {list(gene_data.index[:5])}\")\n", "\n", "# Let's try a different approach - use platform information from the SOFT file\n", "# Read platform information to find probe-to-gene mapping\n", "with gzip.open(soft_file, 'rt') as f:\n", " platform_id = None\n", " for line in f:\n", " if line.startswith('^PLATFORM'):\n", " platform_id = line.strip().split('=')[1]\n", " break\n", "\n", "print(f\"\\nPlatform ID: {platform_id}\")\n", "\n", "# Instead of relying on the limited annotation, let's try to extract gene symbols from the SOFT file\n", "# Read the platform details to find gene symbol mappings\n", "probe_gene_dict = {}\n", "gene_symbol_column = None\n", "probe_id_column = None\n", "current_section = None\n", "\n", "with gzip.open(soft_file, 'rt') as f:\n", " for line in f:\n", " if line.startswith('!platform_table_begin'):\n", " current_section = 'platform_table'\n", " # Read the header line to find relevant columns\n", " header_line = next(f).strip()\n", " headers = header_line.split('\\t')\n", " \n", " # Find columns for probe ID and gene symbol\n", " for i, header in enumerate(headers):\n", " if header.lower() in ['id', 'id_ref', 'probe_id', 'probeid']:\n", " probe_id_column = i\n", " if header.lower() in ['gene_symbol', 'symbol', 'genesymbol']:\n", " gene_symbol_column = i\n", " \n", " if probe_id_column is not None and gene_symbol_column is not None:\n", " print(f\"Found probe ID column ({headers[probe_id_column]}) and gene symbol column ({headers[gene_symbol_column]})\")\n", " # Read the mapping\n", " for data_line in f:\n", " if data_line.startswith('!platform_table_end'):\n", " break\n", " fields = data_line.strip().split('\\t')\n", " if len(fields) > max(probe_id_column, gene_symbol_column):\n", " probe_id = fields[probe_id_column]\n", " gene_symbol = fields[gene_symbol_column]\n", " if gene_symbol: # Only add if gene symbol is not empty\n", " probe_gene_dict[probe_id] = gene_symbol\n", " break\n", "\n", "# If we found mappings, create a mapping DataFrame\n", "if probe_gene_dict:\n", " print(f\"\\nFound {len(probe_gene_dict)} probe-to-gene mappings\")\n", " # Create mapping DataFrame\n", " mapping_data = pd.DataFrame({\n", " 'ID': list(probe_gene_dict.keys()),\n", " 'Gene': list(probe_gene_dict.values())\n", " })\n", " print(\"Mapping dataframe preview:\")\n", " print(preview_df(mapping_data))\n", " \n", " # Apply gene mapping with the new mapping dataframe\n", " try:\n", " gene_data = apply_gene_mapping(gene_data, mapping_data)\n", " print(\"\\nGene expression data after mapping:\")\n", " print(f\"Shape: {gene_data.shape}\")\n", " print(preview_df(gene_data))\n", " \n", " # Normalize gene symbols\n", " gene_data = normalize_gene_symbols_in_index(gene_data)\n", " print(\"\\nGene expression data after normalization:\")\n", " print(f\"Shape: {gene_data.shape}\")\n", " print(preview_df(gene_data))\n", " \n", " # Save the gene data\n", " os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", " gene_data.to_csv(out_gene_data_file)\n", " print(f\"\\nGene data saved to {out_gene_data_file}\")\n", " except Exception as e:\n", " print(f\"Error applying gene mapping: {e}\")\n", "else:\n", " # If we couldn't find the mapping in the SOFT file, try a fallback approach\n", " print(\"\\nCouldn't find proper gene symbol mapping in the SOFT file.\")\n", " print(\"Using Entrez Gene IDs instead...\")\n", " \n", " # Try to fix the format mismatch between gene_data index and gene_annotation ID\n", " # Create a mapping between probe IDs in gene_data and gene annotation\n", " gene_data_ids = set(gene_data.index)\n", " annotation_ids = set(gene_annotation['ID'])\n", " \n", " # Check for any exact matches\n", " common_ids = gene_data_ids.intersection(annotation_ids)\n", " print(f\"Number of exact ID matches: {len(common_ids)}\")\n", " \n", " # If few exact matches, try to match by removing suffixes\n", " if len(common_ids) < 100:\n", " print(\"Trying to match IDs by removing suffixes...\")\n", " # Create a mapping that ignores suffixes like '_at'\n", " cleaned_gene_data_ids = {id.split('_')[0]: id for id in gene_data_ids}\n", " cleaned_annotation_ids = {id.split('_')[0]: id for id in annotation_ids}\n", " \n", " # Find common base IDs\n", " common_base_ids = set(cleaned_gene_data_ids.keys()).intersection(set(cleaned_annotation_ids.keys()))\n", " print(f\"Number of matches after removing suffixes: {len(common_base_ids)}\")\n", " \n", " # Create a mapping from gene_data IDs to annotation IDs\n", " id_mapping = {cleaned_gene_data_ids[base_id]: cleaned_annotation_ids[base_id] \n", " for base_id in common_base_ids if base_id in cleaned_gene_data_ids and base_id in cleaned_annotation_ids}\n", " \n", " if id_mapping:\n", " # Create a new mapping dataframe based on this ID mapping\n", " mapping_rows = []\n", " for gene_data_id, annotation_id in id_mapping.items():\n", " gene_symbol = gene_annotation.loc[gene_annotation['ID'] == annotation_id, 'ENTREZ_GENE_ID'].values\n", " if len(gene_symbol) > 0:\n", " mapping_rows.append({'ID': gene_data_id, 'Gene': gene_symbol[0]})\n", " \n", " if mapping_rows:\n", " mapping_df = pd.DataFrame(mapping_rows)\n", " print(\"\\nCreated mapping dataframe with fixed ID format:\")\n", " print(preview_df(mapping_df))\n", " \n", " # Apply gene mapping with the fixed mapping dataframe\n", " gene_data = apply_gene_mapping(gene_data, mapping_df)\n", " print(\"\\nGene expression data after mapping:\")\n", " print(f\"Shape: {gene_data.shape}\")\n", " print(preview_df(gene_data))\n", " \n", " # Normalize gene symbols\n", " gene_data = normalize_gene_symbols_in_index(gene_data)\n", " print(\"\\nGene expression data after normalization:\")\n", " print(f\"Shape: {gene_data.shape}\")\n", " print(preview_df(gene_data))\n", " \n", " # Save the gene data\n", " os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", " gene_data.\n" ] }, { "cell_type": "markdown", "id": "af0aa29a", "metadata": {}, "source": [ "### Step 7: Gene Identifier Mapping" ] }, { "cell_type": "code", "execution_count": null, "id": "a1b1f1bc", "metadata": {}, "outputs": [], "source": [ "# 1. Reload necessary data\n", "soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)\n", "gene_data = get_genetic_data(matrix_file)\n", "gene_annotation = get_gene_annotation(soft_file)\n", "\n", "# 2. Analyze gene identifiers in gene expression data and gene annotation data\n", "print(\"Gene expression data - first 10 probe IDs:\")\n", "print(list(gene_data.index[:10]))\n", "print(\"Gene annotation data - first 10 probe IDs:\")\n", "print(list(gene_annotation['ID'][:10]))\n", "\n", "# Check if there's any direct overlap between the two sets of IDs\n", "gene_data_ids = set(gene_data.index)\n", "annotation_ids = set(gene_annotation['ID'])\n", "common_ids = gene_data_ids.intersection(annotation_ids)\n", "print(f\"Number of directly matching IDs: {len(common_ids)}\")\n", "\n", "# Try to extract the platform information from the SOFT file\n", "platform_info = {}\n", "with gzip.open(soft_file, 'rt') as f:\n", " for line in f:\n", " line = line.strip()\n", " if line.startswith(\"!Platform_title\"):\n", " platform_info['title'] = line.split(\"=\", 1)[1].strip().strip('\"')\n", " elif line.startswith(\"!Platform_geo_accession\"):\n", " platform_info['accession'] = line.split(\"=\", 1)[1].strip().strip('\"')\n", "\n", "print(\"Platform information:\")\n", "print(platform_info)\n", "\n", "# Create a mapping by cleaning probe IDs\n", "def clean_probe_id(probe_id):\n", " # Remove common suffixes\n", " for suffix in ['_at', '_st', '_a_at', '_s_at', '_x_at']:\n", " if probe_id.endswith(suffix):\n", " return probe_id[:-len(suffix)]\n", " return probe_id\n", "\n", "# Clean and map the IDs\n", "cleaned_gene_data_ids = {clean_probe_id(id): id for id in gene_data_ids}\n", "cleaned_annotation_ids = {clean_probe_id(id): id for id in annotation_ids}\n", "\n", "# Find potential matches based on cleaned IDs\n", "potential_matches = {}\n", "for clean_id, orig_id in cleaned_gene_data_ids.items():\n", " if clean_id in cleaned_annotation_ids:\n", " potential_matches[orig_id] = cleaned_annotation_ids[clean_id]\n", "\n", "print(f\"Found {len(potential_matches)} potential matches after cleaning IDs\")\n", "\n", "# Try numeric matching if needed\n", "if len(potential_matches) < 100:\n", " def extract_numeric(probe_id):\n", " import re\n", " match = re.search(r'(\\d+)', probe_id)\n", " if match:\n", " return match.group(1)\n", " return None\n", "\n", " numeric_gene_data_ids = {extract_numeric(id): id for id in gene_data_ids if extract_numeric(id)}\n", " numeric_annotation_ids = {extract_numeric(id): id for id in annotation_ids if extract_numeric(id)}\n", " \n", " numeric_matches = {}\n", " for num_id, orig_id in numeric_gene_data_ids.items():\n", " if num_id in numeric_annotation_ids:\n", " numeric_matches[orig_id] = numeric_annotation_ids[num_id]\n", " \n", " print(f\"Found {len(numeric_matches)} matches based on numeric part\")\n", " \n", " if len(numeric_matches) > len(potential_matches):\n", " potential_matches = numeric_matches\n", "\n", "# Create a mapping dataframe\n", "if potential_matches:\n", " mapping_rows = []\n", " for gene_data_id, annotation_id in potential_matches.items():\n", " gene_symbols = gene_annotation.loc[gene_annotation['ID'] == annotation_id, 'ENTREZ_GENE_ID']\n", " if not gene_symbols.empty:\n", " mapping_rows.append({'ID': gene_data_id, 'Gene': gene_symbols.iloc[0]})\n", " \n", " mapping_df = pd.DataFrame(mapping_rows)\n", " print(\"Created custom mapping dataframe. Preview:\")\n", " print(preview_df(mapping_df))\n", "else:\n", " # Fallback to original mapping\n", " mapping_df = get_gene_mapping(gene_annotation, 'ID', 'ENTREZ_GENE_ID')\n", " print(\"Using original mapping dataframe. Preview:\")\n", " print(preview_df(mapping_df))\n", "\n", "# 3. Apply the gene mapping to convert probe-level measurements to gene expression data\n", "try:\n", " gene_data_mapped = apply_gene_mapping(gene_data, mapping_df)\n", " print(\"Gene mapping applied. New gene data shape:\", gene_data_mapped.shape)\n", " print(\"Gene data preview after mapping:\")\n", " print(preview_df(gene_data_mapped))\n", " \n", " # If mapping produced results, use it\n", " if gene_data_mapped.shape[0] > 0:\n", " gene_data = gene_data_mapped\n", " else:\n", " # Use a direct approach if mapping failed\n", " print(\"Mapping resulted in empty dataframe. Using a different approach...\")\n", " simple_mapping = pd.DataFrame({\n", " 'ID': gene_data.index,\n", " 'Gene': [str(idx).split('_')[0] for idx in gene_data.index]\n", " })\n", " gene_data = apply_gene_mapping(gene_data, simple_mapping)\n", " print(\"Alternative mapping applied. New gene data shape:\", gene_data.shape)\n", "except Exception as e:\n", " print(f\"Error during gene mapping: {e}\")\n", " # Fallback to a simpler approach\n", " simple_mapping = pd.DataFrame({\n", " 'ID': gene_data.index,\n", " 'Gene': [str(idx).split('_')[0] for idx in gene_data.index]\n", " })\n", " gene_data = apply_gene_mapping(gene_data, simple_mapping)\n", " print(\"Fallback mapping applied. New gene data shape:\", gene_data.shape)\n", "\n", "# 4. Normalize gene symbols to ensure consistency\n", "gene_data = normalize_gene_symbols_in_index(gene_data)\n", "print(\"Gene symbols normalized. Final gene data shape:\", gene_data.shape)\n", "print(\"Gene data preview after normalization:\")\n", "print(preview_df(gene_data))\n", "\n", "# 5. Save the processed gene data to a file\n", "os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)\n", "gene_data.to_csv(out_gene_data_file)\n", "print(f\"Gene data saved to {out_gene_data_file}\")" ] } ], "metadata": {}, "nbformat": 4, "nbformat_minor": 5 }