Spaces:
Running
Running
import streamlit as st | |
import spacy | |
import requests | |
from bs4 import BeautifulSoup | |
import PyPDF2 | |
from io import BytesIO | |
# Load SpaCy model for NLP processing | |
nlp = spacy.load("en_core_web_sm") | |
def extract_text_from_pdf(pdf_file): | |
pdf_reader = PyPDF2.PdfReader(pdf_file) | |
text = "" | |
for page in pdf_reader.pages: | |
text += page.extract_text() or "" | |
return text | |
def extract_skills_and_location(text): | |
doc = nlp(text) | |
skills = [] | |
location = None | |
# Sample skill keywords; can be expanded for a more comprehensive list | |
skill_keywords = ['Python', 'Data Analysis', 'Machine Learning', 'SQL', 'Java', 'Project Management'] | |
for token in doc: | |
if token.text in skill_keywords and token.text not in skills: | |
skills.append(token.text) | |
for ent in doc.ents: | |
if ent.label_ == 'GPE': # GPE (Geopolitical Entity) is often used for cities/countries | |
location = ent.text | |
break | |
return skills, location | |
def fetch_job_listings(job_title, location): | |
base_url = "https://www.careerjet.co.in/jobs" | |
params = { | |
's': job_title.replace(' ', '+'), | |
'l': location.replace(' ', '+') | |
} | |
response = requests.get(base_url, params=params) | |
if response.status_code != 200: | |
st.error("Failed to retrieve the job listings.") | |
return [] | |
soup = BeautifulSoup(response.text, 'html.parser') | |
job_cards = soup.find_all('article', class_='job clicky') | |
job_listings = [] | |
for job in job_cards: | |
title_tag = job.find('h2').find('a') | |
company_tag = job.find('p', class_='company').find('a') | |
location_tag = job.find('ul', class_='location').find('li') | |
description_tag = job.find('div', class_='desc') | |
date_posted_tag = job.find('span', class_='badge badge-r badge-s badge-icon') | |
job_info = { | |
'title': title_tag.text.strip() if title_tag else 'N/A', | |
'company': company_tag.text.strip() if company_tag else 'N/A', | |
'location': location_tag.text.strip() if location_tag else 'N/A', | |
'description': description_tag.text.strip() if description_tag else 'N/A', | |
'date_posted': date_posted_tag.text.strip() if date_posted_tag else 'N/A', | |
'url': f"https://www.careerjet.co.in{title_tag['href']}" if title_tag else '' | |
} | |
job_listings.append(job_info) | |
return job_listings | |
# Streamlit app | |
st.title("Resume-Based Job Recommender") | |
uploaded_file = st.file_uploader("Upload your resume (PDF format)", type=["pdf"]) | |
if uploaded_file is not None: | |
# Extract text from the uploaded resume | |
resume_text = extract_text_from_pdf(uploaded_file) | |
st.text_area("Extracted Resume Text", resume_text, height=300) | |
# Extract skills and location | |
skills, location = extract_skills_and_location(resume_text) | |
st.write("### Extracted Skills") | |
st.write(", ".join(skills) if skills else "No skills found.") | |
st.write("### Extracted Location") | |
st.write(location if location else "No location found.") | |
# Prompt user to enter their preferred job title if necessary | |
job_title = st.text_input("Enter the job title (e.g., 'Data Scientist')", value=skills[0] if skills else "") | |
if st.button("Find Jobs"): | |
if location and job_title: | |
job_listings = fetch_job_listings(job_title, location) | |
if job_listings: | |
st.write("### Job Listings") | |
for job in job_listings: | |
st.write(f"**Title**: {job['title']}") | |
st.write(f"**Company**: {job['company']}") | |
st.write(f"**Location**: {job['location']}") | |
st.write(f"**Posted**: {job['date_posted']}") | |
st.write(f"**Description**: {job['description']}") | |
st.write(f"[Job Link]({job['url']})") | |
st.write("---") | |
else: | |
st.write("No job listings found for the given job title and location.") | |
else: | |
st.warning("Please ensure your resume has a detectable location and skills or enter them manually.") | |