-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathInterview_Questions.py
178 lines (143 loc) · 4.99 KB
/
Interview_Questions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
import os
import io
import logging
import tempfile
import streamlit as st
from zipfile import ZipFile
from langchain.document_loaders import PDFPlumberLoader
from langchain.chat_models import ChatOpenAI
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from config import (
PAGE_TITLE,
PAGE_ICON,
SUB_TITLE,
LAYOUT,
PROMPTS_MAPPING,
MODEL_QUESTIONS,
)
openai_api_key = st.secrets["OPENAI_API_KEY"]
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@st.cache_data
def ingest_pdf(resume_file_buffer):
print("Loading resume...")
try:
# Create a temporary file manually
temp_file_path = tempfile.mktemp(suffix=".pdf")
with open(temp_file_path, "wb") as temp_file:
temp_file.write(resume_file_buffer.read())
# Use the temporary file path
loader = PDFPlumberLoader(temp_file_path)
documents = loader.load()
resume_text = " ".join(document.page_content for document in documents)
print("Resume loaded successfully.")
# Delete the temporary file
os.remove(temp_file_path)
return resume_text
except Exception as e:
logger.error(f"An error occurred while loading the resume: {e}")
st.error(f"An error occurred while loading the resume: {e}")
raise
def get_parameters(
selected_job, job_description_input, high_fit_resume_input, low_fit_resume_input
):
if selected_job == "Input your own":
job_description = job_description_input
else:
selected_prompts = PROMPTS_MAPPING[selected_job]
job_description = (
job_description_input
if job_description_input
else selected_prompts["job_description"]
)
return job_description
# TODO: Switch to OpenAI function LLM call for more reliable response formatting - not an issue for now
@st.cache_data
def get_questions(
resume_text,
job_description,
):
print("Getting score...")
llm = ChatOpenAI(
model=MODEL_QUESTIONS,
temperature=0.0,
openai_api_key=openai_api_key,
)
template = f"""\
You are an Industrial/Organizational Psychologist who is preparing to analyze an applicant based on a job description and resume,
and create a selection of interview questions specific to the applicant in order to determine their potential success in the role.
Applicant Resume:
-----------------
{resume_text}
-----------------
Job Key Areas of Responsibility:
-----------------
{job_description}
-----------------
Based on the job description and the information provided in the resume, please respond with an analysis of this applicant and a
selection of interview questions specific to this applicant and designed to understand better if this person will succeed in this role.
Your Response Format:
Applicant Name
List of positive attributes for the position
List of negative attributes for the position
List of questions for the interview
"""
user_prompt = HumanMessagePromptTemplate.from_template(template=template)
chat_prompt = ChatPromptTemplate.from_messages([user_prompt])
formatted_prompt = chat_prompt.format_prompt(
resume_text=resume_text,
job_description=job_description,
).to_messages()
# print(formatted_prompt)
llm = llm
result = llm(formatted_prompt)
return result.content
def parse_input(file, text_input_key):
if file:
resume_file_buffer = io.BytesIO(file.getbuffer())
return ingest_pdf(resume_file_buffer)
else:
return st.session_state[text_input_key]
# Streamlit interface
st.set_page_config(page_title=PAGE_TITLE, page_icon=PAGE_ICON, layout=LAYOUT)
st.markdown(
f"<h1 style='text-align: center;'>{PAGE_TITLE} {PAGE_ICON} <br> {SUB_TITLE}</h1>",
unsafe_allow_html=True,
)
st.divider()
def select_job():
if "selected_job" not in st.session_state:
st.session_state.selected_job = "CEMM - Senior CPG Account Strategist"
st.session_state.selected_job = st.selectbox(
"Select an open position or add your own",
(
"CEMM - Senior CPG Account Strategist",
"CEMM - Advertising Assistant",
"Input your own",
),
index=0,
key="job_selection",
)
return st.session_state.selected_job
selected_job = select_job()
uploaded_resumes = st.file_uploader(
"Upload Resumes (PDF files)", type=["pdf"], accept_multiple_files=True
)
start_button = st.button("Generate Questions")
if uploaded_resumes and start_button:
try:
zip_data, categorization_results = process_resumes(uploaded_resumes)
if zip_data:
st.download_button(
label="✨ Download Scores ✨",
data=zip_data,
file_name="scores.zip",
mime="application/zip",
)
except Exception as e:
st.error(f"An error occurred during processing: {e}")
logger.error(f"An error occurred during processing: {e}")