Muhammad541's picture
Update app.py
db6e637 verified
import os
import pandas as pd
import torch
from sentence_transformers import SentenceTransformer, util
import faiss
import numpy as np
import pickle
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import scipy.special
from sklearn.feature_extraction.text import TfidfVectorizer
from flask import Flask, request, jsonify
import logging
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Disable tokenizers parallelism to avoid fork-related deadlocks
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# Paths for saving artifacts
MODEL_DIR = "./saved_models"
FALLBACK_MODEL_DIR = "/tmp/saved_models"
try:
os.makedirs(MODEL_DIR, exist_ok=True)
logger.info(f"Using model directory: {MODEL_DIR}")
chosen_model_dir = MODEL_DIR
except Exception as e:
logger.warning(f"Failed to create {MODEL_DIR}: {e}. Using fallback directory.")
os.makedirs(FALLBACK_MODEL_DIR, exist_ok=True)
chosen_model_dir = FALLBACK_MODEL_DIR
# Update paths
UNIVERSAL_MODEL_PATH = os.path.join(chosen_model_dir, "universal_model")
DETECTOR_MODEL_PATH = os.path.join(chosen_model_dir, "detector_model")
TFIDF_PATH = os.path.join(chosen_model_dir, "tfidf_vectorizer.pkl")
SKILL_TFIDF_PATH = os.path.join(chosen_model_dir, "skill_tfidf.pkl")
QUESTION_ANSWER_PATH = os.path.join(chosen_model_dir, "question_to_answer.pkl")
FAISS_INDEX_PATH = os.path.join(chosen_model_dir, "faiss_index.index")
ANSWER_EMBEDDINGS_PATH = os.path.join(chosen_model_dir, "answer_embeddings.pkl")
COURSE_SIMILARITY_PATH = os.path.join(chosen_model_dir, "course_similarity.pkl")
JOB_SIMILARITY_PATH = os.path.join(chosen_model_dir, "job_similarity.pkl")
# Global variables for precomputed data
tfidf_vectorizer = None
skill_tfidf = None
question_to_answer = None
faiss_index = None
answer_embeddings = None
course_similarity = None
job_similarity = None
# Improved dataset loading with fallback
def load_dataset(file_path, required_columns=[], additional_columns=['popularity', 'completion_rate'], fallback_data=None):
try:
df = pd.read_csv(file_path)
missing_required = [col for col in required_columns if col not in df.columns]
missing_additional = [col for col in additional_columns if col not in df.columns]
# Handle missing required columns
if missing_required:
logger.warning(f"Required columns {missing_required} missing in {file_path}. Adding empty values.")
for col in missing_required:
df[col] = ""
# Handle missing additional columns (popularity, completion_rate, etc.)
if missing_additional:
logger.warning(f"Additional columns {missing_additional} missing in {file_path}. Adding default values.")
for col in missing_additional:
if col == 'popularity':
df[col] = 0.8 # Default value for popularity
elif col == 'completion_rate':
df[col] = 0.7 # Default value for completion_rate
else:
df[col] = 0.0 # Default for other additional columns
# Ensure 'level' column has valid values (not empty)
if 'level' in df.columns:
df['level'] = df['level'].apply(lambda x: 'Intermediate' if pd.isna(x) or x.strip() == "" else x)
else:
logger.warning(f"'level' column missing in {file_path}. Adding default 'Intermediate'.")
df['level'] = 'Intermediate'
return df
except ValueError as ve:
logger.error(f"ValueError loading {file_path}: {ve}. Using fallback data.")
if fallback_data is not None:
logger.info(f"Using fallback data for {file_path}")
return pd.DataFrame(fallback_data)
return None
except Exception as e:
logger.error(f"Error loading {file_path}: {e}. Using fallback data.")
if fallback_data is not None:
logger.info(f"Using fallback data for {file_path}")
return pd.DataFrame(fallback_data)
return None
# Load datasets with fallbacks
questions_df = load_dataset("Generated_Skill-Based_Questions.csv", ["Skill", "Question", "Answer"], [], {
'Skill': ['Linux', 'Git', 'Node.js', 'Python', 'Kubernetes'],
'Question': ['Advanced Linux question', 'Advanced Git question', 'Basic Node.js question',
'Intermediate Python question', 'Basic Kubernetes question'],
'Answer': ['Linux answer', 'Git answer', 'Node.js answer', 'Python answer', 'Kubernetes answer']
})
courses_df = load_dataset("coursera_course_dataset_v2_no_null.csv", ["skills", "course_title", "Organization", "level"], ['popularity', 'completion_rate'], {
'skills': ['Linux', 'Git', 'Node.js', 'Python', 'Kubernetes'],
'course_title': ['Linux Admin', 'Git Mastery', 'Node.js Advanced', 'Python for Data', 'Kubernetes Basics'],
'Organization': ['Coursera', 'Udemy', 'Pluralsight', 'edX', 'Linux Foundation'],
'level': ['Intermediate', 'Intermediate', 'Advanced', 'Advanced', 'Intermediate'],
'popularity': [0.85, 0.9, 0.8, 0.95, 0.9],
'completion_rate': [0.65, 0.7, 0.6, 0.8, 0.75]
})
jobs_df = load_dataset("Updated_Job_Posting_Dataset.csv", ["job_title", "company_name", "location", "required_skills", "job_description"], [], {
'job_title': ['DevOps Engineer', 'Cloud Architect', 'Software Engineer', 'Data Scientist', 'Security Analyst'],
'company_name': ['Tech Corp', 'Cloud Inc', 'Tech Solutions', 'Data Co', 'SecuriTech'],
'location': ['Remote', 'Islamabad', 'Karachi', 'Remote', 'Islamabad'],
'required_skills': ['Linux, Kubernetes', 'AWS, Kubernetes', 'Python, Node.js', 'Python, SQL', 'Cybersecurity, Linux'],
'job_description': ['DevOps role description', 'Cloud architecture position', 'Software engineering role', 'Data science position', 'Security analyst role'],
'level': ['Intermediate', 'Advanced', 'Intermediate', 'Intermediate', 'Intermediate']
})
# Validate questions_df
if questions_df is None or questions_df.empty:
logger.error("questions_df is empty or could not be loaded. Exiting.")
exit(1)
if not all(col in questions_df.columns for col in ["Skill", "Question", "Answer"]):
logger.error("questions_df is missing required columns. Exiting.")
exit(1)
logger.info(f"questions_df loaded with {len(questions_df)} rows. Skills available: {list(questions_df['Skill'].unique())}")
# Load or Initialize Models with Fallback
def load_universal_model():
default_model = "all-MiniLM-L6-v2"
try:
if os.path.exists(UNIVERSAL_MODEL_PATH):
logger.info(f"Loading universal model from {UNIVERSAL_MODEL_PATH}")
return SentenceTransformer(UNIVERSAL_MODEL_PATH)
else:
logger.info(f"Loading universal model: {default_model}")
model = SentenceTransformer(default_model)
model.save(UNIVERSAL_MODEL_PATH)
return model
except Exception as e:
logger.error(f"Failed to load universal model {default_model}: {e}. Exiting.")
exit(1)
universal_model = load_universal_model()
if os.path.exists(DETECTOR_MODEL_PATH):
detector_tokenizer = AutoTokenizer.from_pretrained(DETECTOR_MODEL_PATH)
detector_model = AutoModelForSequenceClassification.from_pretrained(DETECTOR_MODEL_PATH)
else:
detector_tokenizer = AutoTokenizer.from_pretrained("roberta-base-openai-detector")
detector_model = AutoModelForSequenceClassification.from_pretrained("roberta-base-openai-detector")
# Load Precomputed Resources
def load_precomputed_resources():
global tfidf_vectorizer, skill_tfidf, question_to_answer, faiss_index, answer_embeddings, course_similarity, job_similarity
if all(os.path.exists(p) for p in [TFIDF_PATH, SKILL_TFIDF_PATH, QUESTION_ANSWER_PATH, FAISS_INDEX_PATH, ANSWER_EMBEDDINGS_PATH, COURSE_SIMILARITY_PATH, JOB_SIMILARITY_PATH]):
try:
with open(TFIDF_PATH, 'rb') as f: tfidf_vectorizer = pickle.load(f)
with open(SKILL_TFIDF_PATH, 'rb') as f: skill_tfidf = pickle.load(f)
with open(QUESTION_ANSWER_PATH, 'rb') as f: question_to_answer = pickle.load(f)
faiss_index = faiss.read_index(FAISS_INDEX_PATH)
with open(ANSWER_EMBEDDINGS_PATH, 'rb') as f: answer_embeddings = pickle.load(f)
with open(COURSE_SIMILARITY_PATH, 'rb') as f: course_similarity = pickle.load(f)
with open(JOB_SIMILARITY_PATH, 'rb') as f: job_similarity = pickle.load(f)
logger.info("Loaded precomputed resources successfully")
except Exception as e:
logger.error(f"Error loading precomputed resources: {e}")
precompute_resources()
else:
precompute_resources()
# Precompute Resources Offline (to be run separately)
def precompute_resources():
global tfidf_vectorizer, skill_tfidf, question_to_answer, faiss_index, answer_embeddings, course_similarity, job_similarity
logger.info("Precomputing resources offline")
try:
tfidf_vectorizer = TfidfVectorizer(stop_words='english')
all_texts = questions_df['Answer'].tolist() + questions_df['Question'].tolist()
tfidf_vectorizer.fit(all_texts)
skill_tfidf = {skill.lower(): tfidf_vectorizer.transform([skill]).toarray()[0] for skill in questions_df['Skill'].unique()}
question_to_answer = dict(zip(questions_df['Question'], questions_df['Answer']))
answer_embeddings = universal_model.encode(questions_df['Answer'].tolist(), batch_size=128, convert_to_tensor=True, device="cuda" if torch.cuda.is_available() else "cpu").cpu().numpy()
faiss_index = faiss.IndexFlatL2(answer_embeddings.shape[1])
faiss_index.add(answer_embeddings)
# Precompute course similarities
course_skills = courses_df['skills'].fillna("").tolist()
course_embeddings = universal_model.encode(course_skills, batch_size=128, convert_to_tensor=True, device="cuda" if torch.cuda.is_available() else "cpu")
skill_embeddings = universal_model.encode(questions_df['Skill'].unique().tolist(), batch_size=128, convert_to_tensor=True, device="cuda" if torch.cuda.is_available() else "cpu")
course_similarity = util.pytorch_cos_sim(skill_embeddings, course_embeddings).cpu().numpy()
# Precompute job similarities
job_skills = jobs_df['required_skills'].fillna("").tolist()
job_embeddings = universal_model.encode(job_skills, batch_size=128, convert_to_tensor=True, device="cuda" if torch.cuda.is_available() else "cpu")
job_similarity = util.pytorch_cos_sim(skill_embeddings, job_embeddings).cpu().numpy()
# Save precomputed resources
with open(TFIDF_PATH, 'wb') as f: pickle.dump(tfidf_vectorizer, f)
with open(SKILL_TFIDF_PATH, 'wb') as f: pickle.dump(skill_tfidf, f)
with open(QUESTION_ANSWER_PATH, 'wb') as f: pickle.dump(question_to_answer, f)
faiss.write_index(faiss_index, FAISS_INDEX_PATH)
with open(ANSWER_EMBEDDINGS_PATH, 'wb') as f: pickle.dump(answer_embeddings, f)
with open(COURSE_SIMILARITY_PATH, 'wb') as f: pickle.dump(course_similarity, f)
with open(JOB_SIMILARITY_PATH, 'wb') as f: pickle.dump(job_similarity, f)
universal_model.save(UNIVERSAL_MODEL_PATH)
logger.info(f"Precomputed resources saved to {chosen_model_dir}")
except Exception as e:
logger.error(f"Error during precomputation: {e}")
raise
# Evaluation with precomputed data
def evaluate_response(args):
try:
skill, user_answer, question_idx = args
if not user_answer:
return skill, 0.0, False
inputs = detector_tokenizer(user_answer, return_tensors="pt", truncation=True, max_length=512)
with torch.no_grad():
logits = detector_model(**inputs).logits
probs = scipy.special.softmax(logits, axis=1).tolist()[0]
is_ai = probs[1] > 0.5
user_embedding = universal_model.encode([user_answer], batch_size=128, convert_to_tensor=True, device="cuda" if torch.cuda.is_available() else "cpu")[0]
expected_embedding = torch.tensor(answer_embeddings[question_idx])
score = util.pytorch_cos_sim(user_embedding, expected_embedding).item() * 100
user_tfidf = tfidf_vectorizer.transform([user_answer]).toarray()[0]
skill_vec = skill_tfidf.get(skill.lower(), np.zeros_like(user_tfidf))
relevance = np.dot(user_tfidf, skill_vec) / (np.linalg.norm(user_tfidf) * np.linalg.norm(skill_vec) + 1e-10)
score *= max(0.5, min(1.0, relevance))
return skill, round(max(0, score), 2), is_ai
except Exception as e:
logger.error(f"Evaluation error for {skill}: {e}")
return skill, 0.0, False
# Course recommendation with precomputed similarity
def recommend_courses(skills_to_improve, user_level, upgrade=False):
try:
if not skills_to_improve or courses_df.empty:
logger.info("No skills to improve or courses_df is empty.")
return []
skill_indices = [list(questions_df['Skill'].unique()).index(skill) for skill in skills_to_improve if skill in questions_df['Skill'].unique()]
if not skill_indices:
logger.info("No matching skill indices found.")
return []
similarities = course_similarity[skill_indices]
# Use default arrays to avoid KeyError
popularity = courses_df['popularity'].values if 'popularity' in courses_df else np.full(len(courses_df), 0.8)
completion_rate = courses_df['completion_rate'].values if 'completion_rate' in courses_df else np.full(len(courses_df), 0.7)
total_scores = 0.6 * np.max(similarities, axis=0) + 0.2 * popularity + 0.2 * completion_rate
target_level = 'Advanced' if upgrade else user_level
idx = np.argsort(-total_scores)[:5]
candidates = courses_df.iloc[idx]
# Filter by level, but fallback to all courses if none match
filtered_candidates = candidates[candidates['level'].str.contains(target_level, case=False, na=False)]
if filtered_candidates.empty:
logger.warning(f"No courses found for level {target_level}. Returning top courses regardless of level.")
filtered_candidates = candidates
return filtered_candidates[['course_title', 'Organization']].values.tolist()[:3]
except Exception as e:
logger.error(f"Course recommendation error: {e}")
return []
# Job recommendation with precomputed similarity
def recommend_jobs(user_skills, user_level):
try:
if jobs_df.empty:
return []
skill_indices = [list(questions_df['Skill'].unique()).index(skill) for skill in user_skills if skill in questions_df['Skill'].unique()]
if not skill_indices:
return []
similarities = job_similarity[skill_indices]
total_scores = 0.5 * np.max(similarities, axis=0)
if 'level' not in jobs_df.columns:
jobs_df['level'] = 'Intermediate'
level_col = jobs_df['level'].astype(str)
level_map = {'Beginner': 0, 'Intermediate': 1, 'Advanced': 2}
user_level_num = level_map.get(user_level, 1)
level_scores = level_col.apply(lambda x: 1 - abs(level_map.get(x, 1) - user_level_num)/2)
location_pref = jobs_df.get('location', pd.Series(['Remote'] * len(jobs_df))).apply(lambda x: 1.0 if x in ['Islamabad', 'Karachi'] else 0.7)
total_job_scores = total_scores + 0.2 * level_scores + 0.1 * location_pref
top_job_indices = np.argsort(-total_job_scores)[:5]
return [(jobs_df.iloc[i]['job_title'], jobs_df.iloc[i]['company_name'],
jobs_df.iloc[i].get('location', 'Remote')) for i in top_job_indices]
except Exception as e:
logger.error(f"Job recommendation error: {e}")
return []
# Flask application setup
app = Flask(__name__)
@app.route('/')
def health_check():
return jsonify({"status": "active", "model_dir": chosen_model_dir})
@app.route('/assess', methods=['POST'])
def assess_skills():
try:
data = request.get_json()
if not data or 'skills' not in data or 'answers' not in data:
return jsonify({"error": "Missing required fields"}), 400
user_skills = [s.strip() for s in data['skills'] if isinstance(s, str)]
answers = [a.strip() for a in data['answers'] if isinstance(a, str)]
user_level = data.get('user_level', 'Intermediate').strip()
if len(answers) != len(user_skills):
return jsonify({"error": "Answers count must match skills count"}), 400
load_precomputed_resources() # Load precomputed resources before processing
user_questions = []
for skill in user_skills:
skill_questions = questions_df[questions_df['Skill'] == skill]
if not skill_questions.empty:
user_questions.append(skill_questions.sample(1).iloc[0])
else:
user_questions.append({
'Skill': skill,
'Question': f"What are the best practices for using {skill} in a production environment?",
'Answer': f"Best practices for {skill} include proper documentation, monitoring, and security measures."
})
user_questions = pd.DataFrame(user_questions).reset_index(drop=True)
user_responses = []
for idx, row in user_questions.iterrows():
answer = answers[idx]
if not answer or answer.lower() == 'skip':
user_responses.append((row['Skill'], None, None))
else:
question_idx = questions_df.index[questions_df['Question'] == row['Question']][0]
user_responses.append((row['Skill'], answer, question_idx))
results = [evaluate_response(response) for response in user_responses]
user_scores = {}
ai_flags = {}
scores_list = []
skipped_questions = [f"{skill} ({question})" for skill, user_code, _ in user_responses if not user_code]
for skill, score, is_ai in results:
if skill in user_scores:
user_scores[skill] = max(user_scores[skill], score)
ai_flags[skill] = ai_flags[skill] or is_ai
else:
user_scores[skill] = score
ai_flags[skill] = is_ai
scores_list.append(score)
mean_score = np.mean(scores_list) if scores_list else 50
dynamic_threshold = max(40, mean_score)
weak_skills = [skill for skill, score in user_scores.items() if score < dynamic_threshold]
courses = recommend_courses(weak_skills or user_skills, user_level, upgrade=not weak_skills)
jobs = recommend_jobs(user_skills, user_level)
return jsonify({
"assessment_results": {
"skills": [
{
"skill": skill,
"progress": f"{'■' * int(score//10)}{'-' * (10 - int(score//10))}",
"score": f"{score:.2f} %",
"origin": "AI-Generated" if is_ai else "Human-Written"
} for skill, score, is_ai in results
],
"mean_score": mean_score,
"dynamic_threshold": dynamic_threshold,
"weak_skills": weak_skills,
"skipped_questions": skipped_questions
},
"recommended_courses": courses[:3],
"recommended_jobs": jobs[:5]
})
except Exception as e:
logger.error(f"Assessment error: {e}")
return jsonify({"error": "Internal server error"}), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=7860, threaded=True)