Liu-Hy's picture
Add files using upload-large-folder tool
5c59ea7 verified
raw
history blame contribute delete
8.67 kB
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Atherosclerosis"
cohort = "GSE109048"
# Input paths
in_trait_dir = "../DATA/GEO/Atherosclerosis"
in_cohort_dir = "../DATA/GEO/Atherosclerosis/GSE109048"
# Output paths
out_data_file = "./output/preprocess/1/Atherosclerosis/GSE109048.csv"
out_gene_data_file = "./output/preprocess/1/Atherosclerosis/gene_data/GSE109048.csv"
out_clinical_data_file = "./output/preprocess/1/Atherosclerosis/clinical_data/GSE109048.csv"
json_path = "./output/preprocess/1/Atherosclerosis/cohort_info.json"
# STEP 1: Initial Data Loading
# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(
matrix_file,
prefixes_a=background_prefixes,
prefixes_b=clinical_prefixes
)
# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)
# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("\nSample Characteristics Dictionary:")
print(sample_characteristics_dict)
# 1. Gene Expression Data Availability
is_gene_available = True # Based on the background info indicating gene expression profiling (platelet mRNA).
# 2. Variable Availability and Data Type Conversion
# Examine sample characteristics: {0: ['tissue: Platelets'], 1: ['diagnosis: sCAD', 'diagnosis: healthy', 'diagnosis: STEMI']}
# We see diagnosis info in row 1. We'll interpret "sCAD" or "STEMI" as having Atherosclerosis (1) and "healthy" as (0).
trait_row = 1
age_row = None
gender_row = None
# Define conversion functions
def convert_trait(value: str) -> Optional[int]:
"""
Convert the diagnosis info (sCAD, STEMI, healthy) to a binary code:
1 for atherosclerosis (sCAD or STEMI), 0 for healthy, None if unknown.
"""
parts = value.split(':')
if len(parts) < 2:
return None
val = parts[1].strip().lower()
if val in ["scad", "stemi"]:
return 1
elif val == "healthy":
return 0
else:
return None
def convert_age(value: str) -> Optional[float]:
"""
No age data available, so we simply return None.
"""
return None
def convert_gender(value: str) -> Optional[int]:
"""
No gender data available, so we simply return None.
"""
return None
# 3. Save Metadata (initial filtering)
is_trait_available = (trait_row is not None)
is_usable = validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# 4. Clinical Feature Extraction (only if trait data is available)
if trait_row is not None:
# Assume 'clinical_data' DataFrame is already loaded in the environment
selected_clinical_df = geo_select_clinical_features(
clinical_df=clinical_data,
trait=trait,
trait_row=trait_row,
convert_trait=convert_trait,
age_row=age_row,
convert_age=convert_age,
gender_row=gender_row,
convert_gender=convert_gender
)
# Preview the extracted features
preview_data = preview_df(selected_clinical_df)
print("Preview of extracted clinical features:", preview_data)
# Save the clinical data to CSV
selected_clinical_df.to_csv(out_clinical_data_file, index=False)
# STEP3
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)
# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
# Based on observation, these identifiers (e.g., '2824546_st') appear to be microarray probe IDs, not typical human gene symbols.
# Therefore, they require mapping to gene symbols.
print("They are microarray probe IDs and require further mapping to standard gene symbols.")
print("requires_gene_mapping = True")
# STEP5
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)
# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# STEP 6: Gene Identifier Mapping
# We have probe IDs in the gene expression data that look like "2824546_st",
# but the annotation has columns "ID" and "probeset_id" with values like "TC01000001.hg.1".
# The library function 'get_gene_mapping' expects the probe column to be named "ID",
# and will raise a KeyError if we pass in a different column name (e.g., "probeset_id").
# Here, we manually build the mapping DataFrame to avoid the KeyError.
# Define the columns in the annotation DataFrame that correspond to probe ID and gene info
prob_col = "probeset_id"
gene_col = "gene_assignment"
# 1. Manually build the mapping DataFrame to avoid the mismatch with the library function.
if prob_col not in gene_annotation.columns or gene_col not in gene_annotation.columns:
print(f"Columns '{prob_col}' or '{gene_col}' not found in annotation. Skipping mapping.")
else:
mapping_df = gene_annotation.loc[:, [prob_col, gene_col]].dropna().copy()
# Rename to "ID" and "Gene" for downstream consistency
mapping_df = mapping_df.rename(columns={prob_col: 'ID', gene_col: 'Gene'})
mapping_df['ID'] = mapping_df['ID'].astype(str)
# 2. Check overlap between annotation IDs and expression data index
common_ids = set(mapping_df['ID']).intersection(set(gene_data.index))
if not common_ids:
print("No matching probe IDs found between gene_data and annotation. Skipping mapping.")
else:
# 3. Apply the mapping to convert probe-level data to gene-level data
gene_data = apply_gene_mapping(gene_data, mapping_df)
print("Mapped gene_data shape:", gene_data.shape)
print(gene_data.head())
import os
import pandas as pd
# STEP 7
# 1. Normalize the gene expression data to standard gene symbols.
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
normalized_gene_data.to_csv(out_gene_data_file)
print("Normalized gene expression data saved to:", out_gene_data_file)
# Check if clinical data exists before linking
if not os.path.exists(out_clinical_data_file):
# Without clinical data, we cannot do trait-based analysis
dummy_df = pd.DataFrame()
trait_biased = True # Mark as unusable because we lack trait information
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=False,
is_biased=trait_biased,
df=dummy_df,
note="No trait data found. This dataset is not usable for final analysis."
)
print("Clinical data file not found. Skipping linking and final data export.")
else:
# 2. Link the clinical and genetic data
# Read the clinical CSV with index_col=0 to preserve the feature name (trait row label)
selected_clinical_df = pd.read_csv(out_clinical_data_file, header=0, index_col=0)
# If there's exactly one row (our trait row), rename it to 'trait'
if selected_clinical_df.shape[0] == 1:
selected_clinical_df.index = [trait]
linked_data = geo_link_clinical_genetic_data(selected_clinical_df, normalized_gene_data)
# 3. Handle missing values
df = handle_missing_values(linked_data, trait)
# 4. Determine whether the trait or demographic features are biased
trait_biased, df = judge_and_remove_biased_features(df, trait)
# 5. Perform final validation
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=trait_biased,
df=df,
note="Final step with linking, missing-value handling, and bias checks."
)
# 6. If the data is usable, save the final linked data
if is_usable:
df.to_csv(out_data_file)
print(f"Final linked data saved to: {out_data_file}")
else:
print("Dataset is not usable or severely biased. No final data saved.")