bangaboy commited on
Commit
df99062
·
verified ·
1 Parent(s): 0312e4a

Create main.py

Browse files
Files changed (1) hide show
  1. main.py +97 -0
main.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from transformers import LayoutLMv3ForTokenClassification, LayoutLMv3FeatureExtractor, LayoutLMv3Tokenizer
4
+ from PIL import Image
5
+ import pytesseract
6
+ from pdf2image import convert_from_path
7
+ import re
8
+
9
+ # Ensure you have the necessary dependencies installed:
10
+ # pip install transformers torch Pillow pytesseract pdf2image
11
+
12
+ # Set up pytesseract path (adjust as needed)
13
+ pytesseract.pytesseract.tesseract_cmd = r'/usr/bin/tesseract'
14
+
15
+ # Load pre-trained model and tokenizer
16
+ model_name = "microsoft/layoutlmv3-base"
17
+ model = LayoutLMv3ForTokenClassification.from_pretrained(model_name, num_labels=5) # Adjust num_labels as needed
18
+ feature_extractor = LayoutLMv3FeatureExtractor.from_pretrained(model_name)
19
+ tokenizer = LayoutLMv3Tokenizer.from_pretrained(model_name)
20
+
21
+ # Define label mapping
22
+ id2label = {0: "O", 1: "COMPANY", 2: "EDUCATION", 3: "POSITION", 4: "DATE"}
23
+ label2id = {v: k for k, v in id2label.items()}
24
+
25
+ def preprocess_document(file_path):
26
+ if file_path.lower().endswith('.pdf'):
27
+ images = convert_from_path(file_path)
28
+ image = images[0] # Process only the first page for simplicity
29
+ else:
30
+ image = Image.open(file_path)
31
+
32
+ # Perform OCR
33
+ ocr_result = pytesseract.image_to_data(image, output_type=pytesseract.Output.DICT)
34
+
35
+ words = ocr_result['text']
36
+ boxes = []
37
+ for i in range(len(words)):
38
+ x, y, w, h = ocr_result['left'][i], ocr_result['top'][i], ocr_result['width'][i], ocr_result['height'][i]
39
+ boxes.append([x, y, x+w, y+h])
40
+
41
+ return image, words, boxes
42
+
43
+ def process_resume(file_path):
44
+ image, words, boxes = preprocess_document(file_path)
45
+
46
+ # Prepare inputs for the model
47
+ encoding = feature_extractor(image, words, boxes=boxes, return_tensors="pt")
48
+ input_ids = encoding["input_ids"]
49
+ attention_mask = encoding["attention_mask"]
50
+ token_type_ids = encoding["token_type_ids"]
51
+ bbox = encoding["bbox"]
52
+
53
+ # Forward pass
54
+ outputs = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, bbox=bbox)
55
+
56
+ # Get predictions
57
+ predictions = outputs.logits.argmax(-1).squeeze().tolist()
58
+
59
+ # Post-process results
60
+ parsed_info = {"COMPANY": [], "EDUCATION": [], "POSITION": [], "DATE": []}
61
+ current_entity = None
62
+ current_text = ""
63
+
64
+ for word, label_id in zip(words, predictions):
65
+ if label_id != 0: # Not 'O' label
66
+ label = id2label[label_id]
67
+ if label != current_entity:
68
+ if current_entity:
69
+ parsed_info[current_entity].append(current_text.strip())
70
+ current_entity = label
71
+ current_text = word + " "
72
+ else:
73
+ current_text += word + " "
74
+ else:
75
+ if current_entity:
76
+ parsed_info[current_entity].append(current_text.strip())
77
+ current_entity = None
78
+ current_text = ""
79
+
80
+ return parsed_info
81
+
82
+ def main():
83
+ resume_path = input("Enter the path to your resume file (PDF or image): ")
84
+ if not os.path.exists(resume_path):
85
+ print("File not found. Please check the path and try again.")
86
+ return
87
+
88
+ parsed_info = process_resume(resume_path)
89
+
90
+ print("\nExtracted Information:")
91
+ print("Companies worked for:", ", ".join(parsed_info["COMPANY"]))
92
+ print("Education:", ", ".join(parsed_info["EDUCATION"]))
93
+ print("Positions held:", ", ".join(parsed_info["POSITION"]))
94
+ print("Relevant dates:", ", ".join(parsed_info["DATE"]))
95
+
96
+ if __name__ == "__main__":
97
+ main()