From 9c9578cc468de1ae99c1bb2cb961a838c0e5beed Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Fri, 20 Jun 2025 11:14:07 -0700 Subject: [PATCH] Improved resume generation by reordering context --- docker-compose.yml | 23 ++ frontend/src/components/ResumeGenerator.tsx | 21 +- frontend/src/components/ui/JobInfo.tsx | 4 +- frontend/src/components/ui/ResumeInfo.css | 25 +- frontend/src/components/ui/ResumeInfo.tsx | 337 ++++++++++++-------- frontend/src/services/api-client.ts | 26 +- frontend/src/types/types.ts | 11 +- src/backend/agents/generate_resume.py | 56 ++-- src/backend/agents/job_requirements.py | 30 +- src/backend/database/mixins/protocols.py | 48 +-- src/backend/database/mixins/resume.py | 41 ++- src/backend/models.py | 18 +- src/backend/routes/candidates.py | 1 + src/backend/routes/jobs.py | 5 +- src/backend/routes/resumes.py | 60 ++-- src/backend/utils/helpers.py | 4 +- src/backend/utils/llm_proxy.py | 22 +- 17 files changed, 429 insertions(+), 303 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 8ff4e76..e2a71fc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -123,6 +123,29 @@ services: networks: - internal + ollama-intel: + image: intelanalytics/ipex-llm-inference-cpp-xpu:latest + container_name: ollama + restart: unless-stopped + env_file: + - .env + devices: + - /dev/dri:/dev/dri + volumes: + - ./cache:/root/.cache # Cache hub models and neo_compiler_cache + - ./ollama:/root/.ollama # Cache the ollama models + ports: + - 11434:11434 + environment: + - OLLAMA_HOST=0.0.0.0 + - DEVICE=Arc + - OLLAMA_INTEL_GPU=true + - OLLAMA_NUM_GPU=999 + - ZES_ENABLE_SYSMAN=1 + - ONEAPI_DEVICE_SELECTOR=level_zero:0 + - TZ=America/Los_Angeles + command: sh -c 'mkdir -p /llm/ollama && cd /llm/ollama && init-ollama && exec ./ollama serve' + ollama: build: context: . diff --git a/frontend/src/components/ResumeGenerator.tsx b/frontend/src/components/ResumeGenerator.tsx index a3c8c83..336d68a 100644 --- a/frontend/src/components/ResumeGenerator.tsx +++ b/frontend/src/components/ResumeGenerator.tsx @@ -12,6 +12,7 @@ import { StatusBox, StatusIcon } from './ui/StatusIcon'; import { CopyBubble } from './CopyBubble'; import { useAppState } from 'hooks/GlobalContext'; import { StreamingOptions } from 'services/api-client'; +import { Navigate, useNavigate } from 'react-router-dom'; interface ResumeGeneratorProps { job: Job; @@ -23,6 +24,7 @@ interface ResumeGeneratorProps { const ResumeGenerator: React.FC = (props: ResumeGeneratorProps) => { const { job, candidate, skills, onComplete } = props; const { setSnack } = useAppState(); + const navigate = useNavigate(); const { apiClient, user } = useAuth(); const [resume, setResume] = useState(''); const [prompt, setPrompt] = useState(''); @@ -49,9 +51,10 @@ const ResumeGenerator: React.FC = (props: ResumeGeneratorP const generateResumeHandlers: StreamingOptions = { onMessage: (message: Types.ChatMessageResume) => { - setSystemPrompt(message.systemPrompt || ''); - setPrompt(message.prompt || ''); - setResume(message.resume || ''); + const resume: Types.Resume = message.resume; + setSystemPrompt(resume.systemPrompt || ''); + setPrompt(resume.prompt || ''); + setResume(resume.resume || ''); setStatus(''); }, onStreaming: (chunk: Types.ChatMessageStreaming) => { @@ -115,10 +118,18 @@ const ResumeGenerator: React.FC = (props: ResumeGeneratorP setSnack('Candidate or job ID is missing.'); return; } - const controller = apiClient.saveResume(candidate.id, job.id, resume); + const submission: Types.Resume = { + jobId: job.id, + candidateId: candidate.id, + resume, + systemPrompt, + prompt, + }; + const controller = apiClient.saveResume(submission); const result = await controller.promise; if (result.resume.id) { setSnack('Resume saved successfully!'); + navigate(`/candidate/resumes/${result.resume.id}`); } } catch (error) { console.error('Error saving resume:', error); @@ -177,7 +188,7 @@ const ResumeGenerator: React.FC = (props: ResumeGeneratorP {resume && !status && !error && ( )} diff --git a/frontend/src/components/ui/JobInfo.tsx b/frontend/src/components/ui/JobInfo.tsx index c8bacce..c0d110b 100644 --- a/frontend/src/components/ui/JobInfo.tsx +++ b/frontend/src/components/ui/JobInfo.tsx @@ -44,9 +44,8 @@ interface JobInfoProps { const JobInfo: React.FC = (props: JobInfoProps) => { const { setSnack } = useAppState(); - const { job } = props; const { user, apiClient } = useAuth(); - const { sx, action = '', elevation = 1, variant = 'normal' } = props; + const { sx, action = '', elevation = 1, variant = 'normal', job } = props; const theme = useTheme(); const isMobile = useMediaQuery(theme.breakpoints.down('md')) || variant === 'minimal'; const isAdmin = user?.isAdmin; @@ -236,6 +235,7 @@ const JobInfo: React.FC = (props: JobInfoProps) => { return ( = (props: ResumeInfoProps) => { const isMobile = useMediaQuery(theme.breakpoints.down('md')) || variant === 'minimal'; const isAdmin = user?.isAdmin; const [activeResume, setActiveResume] = useState({ ...resume }); - const [isContentExpanded, setIsContentExpanded] = useState(false); - const [shouldShowMoreButton, setShouldShowMoreButton] = useState(false); const [deleted, setDeleted] = useState(false); const [editDialogOpen, setEditDialogOpen] = useState(false); const [printDialogOpen, setPrintDialogOpen] = useState(false); const [editContent, setEditContent] = useState(''); + const [editSystemPrompt, setEditSystemPrompt] = useState(''); + const [editPrompt, setEditPrompt] = useState(''); const [saving, setSaving] = useState(false); - const contentRef = useRef(null); const [tabValue, setTabValue] = useState('markdown'); + const [status, setStatus] = useState(''); + const [statusType, setStatusType] = useState(null); + const [error, setError] = useState(null); + const printContentRef = useRef(null); const reactToPrintFn = useReactToPrint({ contentRef: printContentRef, @@ -92,13 +103,6 @@ const ResumeInfo: React.FC = (props: ResumeInfoProps) => { }, [resume, activeResume]); // Check if content needs truncation - useEffect(() => { - if (contentRef.current && resume.resume) { - const element = contentRef.current; - setShouldShowMoreButton(element.scrollHeight > element.clientHeight); - } - }, [resume.resume]); - const deleteResume = async (id: string | undefined) => { if (id) { try { @@ -118,11 +122,17 @@ const ResumeInfo: React.FC = (props: ResumeInfoProps) => { const handleSave = async () => { setSaving(true); try { - const result = await apiClient.updateResume(activeResume.id || '', editContent); - const updatedResume = { + const resumeUpdate = { ...activeResume, resume: editContent, - updatedAt: new Date(), + systemPrompt: editSystemPrompt, + prompt: editPrompt, + }; + const result = await apiClient.updateResume(resumeUpdate); + console.log('Resume updated:', result); + const updatedResume = { + ...activeResume, + ...result }; setActiveResume(updatedResume); setSnack('Resume updated successfully.'); @@ -135,6 +145,8 @@ const ResumeInfo: React.FC = (props: ResumeInfoProps) => { const handleEditOpen = () => { setEditContent(activeResume.resume); + setEditSystemPrompt(activeResume.systemPrompt || ''); + setEditPrompt(activeResume.prompt || ''); setEditDialogOpen(true); }; @@ -144,13 +156,60 @@ const ResumeInfo: React.FC = (props: ResumeInfoProps) => { const formatDate = (date: Date | undefined) => { if (!date) return 'N/A'; - return new Intl.DateTimeFormat('en-US', { - month: 'short', - day: 'numeric', - year: 'numeric', - hour: '2-digit', - minute: '2-digit', - }).format(date); + try { + return new Intl.DateTimeFormat('en-US', { + month: 'short', + day: 'numeric', + year: 'numeric', + hour: '2-digit', + minute: '2-digit', + }).format(date); + } catch (error) { + console.error('Error formatting date:', error); + return 'Invalid date'; + } + }; + + const generateResumeHandlers: StreamingOptions = { + onMessage: (message: Types.ChatMessageResume) => { + const resume: Resume = message.resume; + setEditSystemPrompt(resume.systemPrompt || ''); + setEditPrompt(resume.prompt || ''); + setEditContent(resume.resume); + setActiveResume({ ...resume }); + setStatus(''); + setSnack('Resume generation completed successfully.'); + }, + onStreaming: (chunk: Types.ChatMessageStreaming) => { + if (status === '') { + setStatus('Generating resume...'); + setStatusType('generating'); + } + setEditContent(chunk.content); + }, + onStatus: (status: Types.ChatMessageStatus) => { + console.log('status:', status.content); + setStatusType(status.activity); + setStatus(status.content); + }, + onError: (error: Types.ChatMessageError) => { + console.log('error:', error); + setStatusType(null); + setStatus(error.content); + setError(error); + }, + }; + + const generateResume = async (): Promise => { + setStatusType('thinking'); + setStatus('Starting resume generation...'); + setActiveResume({ ...activeResume, resume: '' }); // Reset resume content + const request = await apiClient.generateResume( + activeResume.candidateId || '', + activeResume.jobId || '', + generateResumeHandlers + ); + await request.promise; }; const handleTabChange = (event: React.SyntheticEvent, newValue: string) => { @@ -158,6 +217,12 @@ const ResumeInfo: React.FC = (props: ResumeInfoProps) => { reactToPrintFn(); return; } + if (newValue === 'regenerate') { + // Handle resume regeneration logic here + setSnack('Regenerating resume...'); + generateResume(); + return; + } setTabValue(newValue); }; @@ -281,48 +346,23 @@ const ResumeInfo: React.FC = (props: ResumeInfoProps) => { /> - - {activeResume.resume} - - - {shouldShowMoreButton && variant !== 'all' && ( - - + + + {activeResume.resume} - )} + @@ -451,81 +491,124 @@ const ResumeInfo: React.FC = (props: ResumeInfoProps) => { height: '100%', }} > - - } label="Markdown" /> - } label="Preview" /> - } label="Job" /> - } label="Print" /> - - *:not(.Scrollable)': { - flexShrink: 0 /* Prevent shrinking */, - }, - position: 'relative', - }} - > - {tabValue === 'markdown' && ( - setEditContent(value)} - style={{ - position: 'relative', - // maxHeight: "100%", - height: '100%', - width: '100%', + + + + } label="Markdown" /> + {activeResume.systemPrompt && } label="System Prompt" />} + {activeResume.systemPrompt && } label="Prompt" />} + } label="Preview" /> + } label="Print" /> + } label="Regenerate" /> + + {status && ( + + + {statusType && } + + {status || 'Processing...'} + + + {status && !error && } + + )} + *:not(.Scrollable)': { + flexShrink: 0 /* Prevent shrinking */, + }, + position: 'relative', }} - placeholder="Enter resume content..." - /> - )} - {tabValue === 'preview' && ( - - - + {tabValue === 'markdown' && ( + setEditContent(value)} + style={{ position: 'relative', - maxHeight: '100%', + maxHeight: "100%", + height: '100%', width: '100%', display: 'flex', + minHeight: '100%', + flexGrow: 1, flex: 1 /* Take remaining space in some-container */, - // overflowY: 'auto' /* Scroll if content overflows */, + overflowY: 'auto' /* Scroll if content overflows */, }} - content={editContent} + placeholder="Enter resume content..." /> - -   - - )} - {tabValue === 'job' && activeResume.job && ( - - )} + )} + {tabValue === 'systemPrompt' && ( + setEditSystemPrompt(value)} + style={{ + position: 'relative', + maxHeight: "100%", + // height: '100%', + width: '100%', + display: 'flex', + minHeight: '100%', + flexGrow: 1, + flex: 1 /* Take remaining space in some-container */, + overflowY: 'auto' /* Scroll if content overflows */, + }} + placeholder="Edit system prompt..." + /> + )} + {tabValue === 'prompt' && ( + setEditPrompt(value)} + style={{ + position: 'relative', + maxHeight: "100%", + height: '100%', + width: '100%', + display: 'flex', + minHeight: '100%', + + flexGrow: 1, + flex: 1 /* Take remaining space in some-container */, + overflowY: 'auto' /* Scroll if content overflows */, + }} + placeholder="Edit prompt..." + /> + )} + {tabValue === 'preview' && ( + + + + +   + + )} + + + + + {activeResume.job !== undefined && } + + diff --git a/frontend/src/services/api-client.ts b/frontend/src/services/api-client.ts index 328dfc6..1936595 100644 --- a/frontend/src/services/api-client.ts +++ b/frontend/src/services/api-client.ts @@ -699,18 +699,11 @@ class ApiClient { } saveResume( - candidate_id: string, - job_id: string, - resume: string, + resume: Types.Resume, streamingOptions?: StreamingOptions ): StreamingResponse { - const body = JSON.stringify(resume); - return this.streamify( - `/resumes/${candidate_id}/${job_id}`, - body, - streamingOptions, - 'Resume' - ); + const body = JSON.stringify(formatApiRequest(resume)); + return this.streamify(`/resumes`, body, streamingOptions, 'Resume'); } // Additional API methods for Resume management @@ -810,11 +803,12 @@ class ApiClient { return handleApiResponse<{ success: boolean; statistics: any }>(response); } - async updateResume(resumeId: string, content: string): Promise { - const response = await fetch(`${this.baseUrl}/resumes/${resumeId}`, { - method: 'PUT', + async updateResume(resume: Types.Resume): Promise { + const body = JSON.stringify(formatApiRequest(resume)); + const response = await fetch(`${this.baseUrl}/resumes`, { + method: 'PATCH', headers: this.defaultHeaders, - body: JSON.stringify(content), + body: body, }); return this.handleApiResponseWithConversion(response, 'Resume'); @@ -1524,7 +1518,9 @@ class ApiClient { case 'done': const message = ( - modelType ? convertFromApi(incoming, modelType) : incoming + modelType + ? convertFromApi(parseApiResponse(incoming), modelType) + : incoming ) as T; finalMessage = message; try { diff --git a/frontend/src/types/types.ts b/frontend/src/types/types.ts index d46116e..3171691 100644 --- a/frontend/src/types/types.ts +++ b/frontend/src/types/types.ts @@ -1,6 +1,6 @@ // Generated TypeScript types from Pydantic models // Source: src/backend/models.py -// Generated on: 2025-06-18T22:54:34.823060 +// Generated on: 2025-06-19T22:17:35.101284 // DO NOT EDIT MANUALLY - This file is auto-generated // ============================ @@ -354,9 +354,7 @@ export interface ChatMessageResume { content: string; tunables?: Tunables; metadata: ChatMessageMetaData; - resume: string; - systemPrompt?: string; - prompt?: string; + resume: Resume; } export interface ChatMessageSkillAssessment { @@ -976,6 +974,8 @@ export interface Resume { jobId: string; candidateId: string; resume: string; + systemPrompt?: string; + prompt?: string; createdAt?: Date; updatedAt?: Date; job?: Job; @@ -1377,6 +1377,7 @@ export function convertChatMessageRagSearchFromApi(data: any): ChatMessageRagSea /** * Convert ChatMessageResume from API response * Date fields: timestamp + * Nested models: resume (Resume) */ export function convertChatMessageResumeFromApi(data: any): ChatMessageResume { if (!data) return data; @@ -1385,6 +1386,8 @@ export function convertChatMessageResumeFromApi(data: any): ChatMessageResume { ...data, // Convert timestamp from ISO string to Date timestamp: data.timestamp ? new Date(data.timestamp) : undefined, + // Convert nested Resume model + resume: convertResumeFromApi(data.resume), }; } /** diff --git a/src/backend/agents/generate_resume.py b/src/backend/agents/generate_resume.py index ab95884..e14c513 100644 --- a/src/backend/agents/generate_resume.py +++ b/src/backend/agents/generate_resume.py @@ -19,6 +19,7 @@ from models import ( ChatMessageError, ChatMessageResume, ChatMessageStatus, + Resume, SkillAssessment, SkillStrength, ) @@ -63,16 +64,13 @@ class GenerateResume(Agent): if skill and strength in skills_by_strength: skills_by_strength[strength].append(skill) - # Collect experience evidence + if skill not in experience_evidence: + experience_evidence[skill] = [] + # Collect experience evidence, grouped by skill for evidence in assessment.evidence_details: - source = evidence.source - if source: - if source not in experience_evidence: - experience_evidence[source] = [] - - experience_evidence[source].append( - {"skill": skill, "quote": evidence.quote, "context": evidence.context} - ) + experience_evidence[skill].append( + {"source": evidence.source, "quote": evidence.quote, "context": evidence.context} + ) # Build the system prompt system_prompt = f"""You are a professional resume writer with expertise in highlighting candidate strengths and experiences. @@ -91,21 +89,21 @@ Phone: {self.user.phone or 'N/A'} system_prompt += f"""\ ### Strong Skills (prominent in resume): -{", ".join(skills_by_strength[SkillStrength.STRONG])} +* {".\n* ".join(skills_by_strength[SkillStrength.STRONG])} """ if len(skills_by_strength[SkillStrength.MODERATE]): system_prompt += f"""\ ### Moderate Skills (demonstrated in resume): -{", ".join(skills_by_strength[SkillStrength.MODERATE])} +* {".\n* ".join(skills_by_strength[SkillStrength.MODERATE])} """ if len(skills_by_strength[SkillStrength.WEAK]): system_prompt += f"""\ ### Weaker Skills (mentioned or implied): -{", ".join(skills_by_strength[SkillStrength.WEAK])} +* {".\n* ".join(skills_by_strength[SkillStrength.WEAK])} """ system_prompt += """\ @@ -114,10 +112,19 @@ Phone: {self.user.phone or 'N/A'} """ # Add experience evidence by source/position - for source, evidences in experience_evidence.items(): - system_prompt += f"\n### {source}:\n" + for skill, evidences in experience_evidence.items(): + system_prompt += f"\n### {skill}:\n" + last_source = None + index = 0 + sub_index = 1 for evidence in evidences: - system_prompt += f"- {evidence['skill']}: {evidence['context']}\n" + if last_source != evidence['source']: + index += 1 + last_source = evidence['source'] + system_prompt += f"{index}. Source: {last_source}:\n" + sub_index = 1 + system_prompt += f" {index}.{sub_index}. Quote: \"{evidence['quote']}\"\n Evidence: {evidence['context']}\n" + sub_index += 1 # Add instructions for the resume creation system_prompt += """\ @@ -132,8 +139,8 @@ When sections lack data, output "Information not provided" or use placeholder te 2. Format the resume in a clean, concise, and modern style that will pass ATS systems. 3. Include these sections: - Professional Summary (highlight strongest skills and experience level) - - Skills (organized by strength, under a single section). When listing skills, rephrase them so they are not identical to the original assessment. - - Professional Experience (focus on achievements and evidence of the skill) + - Skills (organized by strength, under a single section). When listing skills, rephrase them so they are not identical to the original assessment. Do not list the strengths explicitly, but rather integrate them into the skills section. + - Professional Experience (focus on achievements and evidence of the skill.) For the skills listed, identify content from the EXPERIENCE EVIDENCE and summarize experiences with specific details achievements where possible, ordering by date and job. 4. Optional sections, to include only if evidence is present: - Education section Certifications section @@ -169,6 +176,11 @@ Format it in clean, ATS-friendly markdown. Provide ONLY the resume with no comme async def generate_resume( self, llm: Any, model: str, session_id: str, skills: List[SkillAssessment] ) -> AsyncGenerator[ApiMessage, None]: + if not self.user: + error_message = ChatMessageError(session_id=session_id, content="User must be set before generating resume.") + logger.error(f"⚠️ {error_message.content}") + yield error_message + return # Stage 1A: Analyze job requirements status_message = ChatMessageStatus( session_id=session_id, content="Analyzing job requirements", activity=ApiActivityType.THINKING @@ -208,9 +220,13 @@ Format it in clean, ATS-friendly markdown. Provide ONLY the resume with no comme status=ApiStatusType.DONE, content="Resume generation completed successfully.", metadata=generated_message.metadata, - resume=generated_message.content, - prompt=prompt, - system_prompt=system_prompt, + resume=Resume( + job_id="N/A", + candidate_id=self.user.id, + resume=generated_message.content, + prompt=prompt, + system_prompt=system_prompt + ) ) yield resume_message logger.info("✅ Resume generation completed successfully.") diff --git a/src/backend/agents/job_requirements.py b/src/backend/agents/job_requirements.py index 2c62a24..102f944 100644 --- a/src/backend/agents/job_requirements.py +++ b/src/backend/agents/job_requirements.py @@ -39,24 +39,25 @@ class JobRequirementsAgent(Agent): logger.info(f"{self.agent_type} - {inspect.stack()[0].function}") system_prompt = """ You are an objective job requirements analyzer. Your task is to extract and categorize the specific skills, -experiences, and qualifications required in a job description WITHOUT any reference to any candidate. +experiences, and qualifications required in a job description. ## INSTRUCTIONS: -1. Analyze ONLY the job description provided. +1. Analyze ONLY the <|job_description|> provided, and provide only requirements from that description. 2. Extract company information, job title, and all requirements. -3. If a requirement is compound (e.g., "5+ years experience with React, Node.js and MongoDB" or "FastAPI/Django/React"), break it down into individual components. -4. Categorize requirements into: +3. If a requirement can be broken into multiple requirements, do so. +4. Categorize each requirement into one and only one of the following categories: - Technical skills (required and preferred) - Experience requirements (required and preferred) -- Education requirements -- Soft skills -- Industry knowledge -- Responsibilities +- Soft skills (e.g., "excellent communication skills") +- Experience (e.g., "5+ years in software development") +- Eduction +- Certifications (e.g., "AWS Certified Solutions Architect") +- Preferred attributes (e.g., "team player", "self-motivated") - Company values 5. Extract and categorize all requirements and preferences. -6. DO NOT consider any candidate information - this is a pure job analysis task. -7. Provide the output in a structured JSON format as specified below. +6. Provide the output in a structured JSON format as specified below. +7. If there are no requirements in a category, leave it as an empty list. ## OUTPUT FORMAT: @@ -85,17 +86,10 @@ experiences, and qualifications required in a job description WITHOUT any refere ``` Be specific and detailed in your extraction. -If a requirement can be broken down into several separate requirements, split them. -For example, the technical_skill of "Python/Django/FastAPI" should be separated into different requirements: Python, Django, and FastAPI. - -For example, if the job description mentions: "Python/Django/FastAPI", you should extract it as: - -"technical_skills": { "required": [ "Python", "Django", "FastAPI" ] }, - Avoid vague categorizations and be precise about whether skills are explicitly required or just preferred. """ - prompt = f"Job Description:\n{job_description}" + prompt = f"<|job_description|>\n{job_description}\n\n" return system_prompt, prompt async def analyze_job_requirements( diff --git a/src/backend/database/mixins/protocols.py b/src/backend/database/mixins/protocols.py index eb8cabb..028a202 100644 --- a/src/backend/database/mixins/protocols.py +++ b/src/backend/database/mixins/protocols.py @@ -5,7 +5,7 @@ from redis.asyncio import Redis if TYPE_CHECKING: pass -from models import SkillAssessment +from models import Resume, SkillAssessment class DatabaseProtocol(Protocol): @@ -172,9 +172,6 @@ class DatabaseProtocol(Protocol): async def delete_all_candidate_documents(self, candidate_id: str) -> int: ... - async def delete_all_resumes_for_user(self, user_id: str) -> int: - ... - async def delete_authentication(self, user_id: str) -> bool: ... @@ -190,8 +187,6 @@ class DatabaseProtocol(Protocol): async def delete_job(self, job_id: str): ... - async def delete_resume(self, user_id: str, resume_id: str) -> bool: - ... async def delete_viewer(self, viewer_id: str): ... @@ -229,12 +224,6 @@ class DatabaseProtocol(Protocol): async def get_all_jobs(self) -> Dict[str, Any]: ... - async def get_all_resumes_for_user(self, user_id: str) -> List[Dict]: - ... - - async def get_all_resumes(self) -> Dict[str, List[Dict]]: - ... - async def get_authentication(self, user_id: str) -> Optional[Dict[str, Any]]: ... @@ -304,13 +293,34 @@ class DatabaseProtocol(Protocol): async def get_refresh_token(self, token: str) -> Optional[Dict[str, Any]]: ... - async def get_resumes_by_candidate(self, user_id: str, candidate_id: str) -> List[Dict]: + async def search_resumes_for_user(self, user_id: str, query: str) -> List[Resume]: ... - async def get_resumes_by_job(self, user_id: str, job_id: str) -> List[Dict]: + async def set_resume(self, user_id: str, resume_data: Dict) -> bool: ... - async def get_resume(self, user_id: str, resume_id: str) -> Optional[Dict]: + async def delete_all_resumes_for_user(self, user_id: str) -> int: + ... + + async def get_all_resumes_for_user(self, user_id: str) -> List[Dict]: + ... + + async def update_resume(self, user_id: str, resume_id: str, updates: Dict) -> Optional[Resume]: + ... + + async def delete_resume(self, user_id: str, resume_id: str) -> bool: + ... + + async def get_all_resumes(self) -> Dict[str, List[Dict]]: + ... + + async def get_resumes_by_candidate(self, user_id: str, candidate_id: str) -> List[Resume]: + ... + + async def get_resumes_by_job(self, user_id: str, job_id: str) -> List[Resume]: + ... + + async def get_resume(self, user_id: str, resume_id: str) -> Optional[Resume]: ... async def get_resume_statistics(self, user_id: str) -> Dict[str, Any]: @@ -364,9 +374,6 @@ class DatabaseProtocol(Protocol): async def search_chat_messages(self, session_id: str, query: str) -> List[Dict]: ... - async def search_resumes_for_user(self, user_id: str, query: str) -> List[Dict]: - ... - async def set_ai_parameters(self, param_id: str, param_data: Dict): ... @@ -388,9 +395,6 @@ class DatabaseProtocol(Protocol): async def set_job(self, job_id: str, job_data: Dict): ... - async def set_resume(self, user_id: str, resume_data: Dict) -> bool: - ... - async def set_viewer(self, viewer_id: str, viewer_data: Dict): ... @@ -414,5 +418,3 @@ class DatabaseProtocol(Protocol): async def update_document(self, document_id: str, updates: Dict) -> Dict[Any, Any] | None: ... - async def update_resume(self, user_id: str, resume_id: str, updates: Dict) -> Optional[Dict]: - ... diff --git a/src/backend/database/mixins/resume.py b/src/backend/database/mixins/resume.py index 9e4672f..ed9647e 100644 --- a/src/backend/database/mixins/resume.py +++ b/src/backend/database/mixins/resume.py @@ -2,6 +2,8 @@ from datetime import UTC, datetime import logging from typing import Any, Dict, List, Optional +from models import Resume + from .protocols import DatabaseProtocol from ..constants import KEY_PREFIXES @@ -14,27 +16,23 @@ class ResumeMixin(DatabaseProtocol): async def set_resume(self, user_id: str, resume_data: Dict) -> bool: """Save a resume for a user""" try: - # Generate resume_id if not present - if "id" not in resume_data: - raise ValueError("Resume data must include an 'id' field") - - resume_id = resume_data["id"] + resume = Resume.model_validate(resume_data) # Store the resume data - key = f"{KEY_PREFIXES['resumes']}{user_id}:{resume_id}" + key = f"{KEY_PREFIXES['resumes']}{user_id}:{resume.id}" await self.redis.set(key, self._serialize(resume_data)) # Add resume_id to user's resume list user_resumes_key = f"{KEY_PREFIXES['user_resumes']}{user_id}" - await self.redis.rpush(user_resumes_key, resume_id) # type: ignore + await self.redis.rpush(user_resumes_key, resume.id) # type: ignore - logger.info(f"📄 Saved resume {resume_id} for user {user_id}") + logger.info(f"📄 Saved resume {resume.id} for user {user_id}") return True except Exception as e: logger.error(f"❌ Error saving resume for user {user_id}: {e}") return False - async def get_resume(self, user_id: str, resume_id: str) -> Optional[Dict]: + async def get_resume(self, user_id: str, resume_id: str) -> Optional[Resume]: """Get a specific resume for a user""" try: key = f"{KEY_PREFIXES['resumes']}{user_id}:{resume_id}" @@ -42,7 +40,7 @@ class ResumeMixin(DatabaseProtocol): if data: resume_data = self._deserialize(data) logger.info(f"📄 Retrieved resume {resume_id} for user {user_id}") - return resume_data + return Resume.model_validate(resume_data) logger.info(f"📄 Resume {resume_id} not found for user {user_id}") return None except Exception as e: @@ -178,7 +176,7 @@ class ResumeMixin(DatabaseProtocol): logger.error(f"❌ Error retrieving all resumes: {e}") return {} - async def search_resumes_for_user(self, user_id: str, query: str) -> List[Dict]: + async def search_resumes_for_user(self, user_id: str, query: str) -> List[Resume]: """Search resumes for a user by content, job title, or candidate name""" try: all_resumes = await self.get_all_resumes_for_user(user_id) @@ -200,16 +198,16 @@ class ResumeMixin(DatabaseProtocol): matching_resumes.append(resume) logger.info(f"📄 Found {len(matching_resumes)} matching resumes for user {user_id}") - return matching_resumes + return [Resume.model_validate(resume) for resume in matching_resumes] except Exception as e: logger.error(f"❌ Error searching resumes for user {user_id}: {e}") return [] - async def get_resumes_by_candidate(self, user_id: str, candidate_id: str) -> List[Dict]: + async def get_resumes_by_candidate(self, user_id: str, candidate_id: str) -> List[Resume]: """Get all resumes for a specific candidate created by a user""" try: all_resumes = await self.get_all_resumes_for_user(user_id) - candidate_resumes = [resume for resume in all_resumes if resume.get("candidate_id") == candidate_id] + candidate_resumes = [Resume.model_validate(resume) for resume in all_resumes if resume.get("candidate_id") == candidate_id] logger.info(f"📄 Found {len(candidate_resumes)} resumes for candidate {candidate_id} by user {user_id}") return candidate_resumes @@ -217,11 +215,11 @@ class ResumeMixin(DatabaseProtocol): logger.error(f"❌ Error retrieving resumes for candidate {candidate_id} by user {user_id}: {e}") return [] - async def get_resumes_by_job(self, user_id: str, job_id: str) -> List[Dict]: + async def get_resumes_by_job(self, user_id: str, job_id: str) -> List[Resume]: """Get all resumes for a specific job created by a user""" try: all_resumes = await self.get_all_resumes_for_user(user_id) - job_resumes = [resume for resume in all_resumes if resume.get("job_id") == job_id] + job_resumes = [Resume.model_validate(resume) for resume in all_resumes if resume.get("job_id") == job_id] logger.info(f"📄 Found {len(job_resumes)} resumes for job {job_id} by user {user_id}") return job_resumes @@ -274,19 +272,20 @@ class ResumeMixin(DatabaseProtocol): "recent_resumes": [], } - async def update_resume(self, user_id: str, resume_id: str, updates: Dict) -> Optional[Dict]: + async def update_resume(self, user_id: str, resume_id: str, updates: Dict) -> Optional[Resume]: """Update specific fields of a resume""" try: resume_data = await self.get_resume(user_id, resume_id) if resume_data: - resume_data.update(updates) - resume_data["updated_at"] = datetime.now(UTC).isoformat() + resume_dict = resume_data.model_dump() + resume_dict.update(updates) + resume_dict["updated_at"] = datetime.now(UTC).isoformat() key = f"{KEY_PREFIXES['resumes']}{user_id}:{resume_id}" - await self.redis.set(key, self._serialize(resume_data)) + await self.redis.set(key, self._serialize(resume_dict)) logger.info(f"📄 Updated resume {resume_id} for user {user_id}") - return resume_data + return Resume.model_validate(resume_dict) return None except Exception as e: logger.error(f"❌ Error updating resume {resume_id} for user {user_id}: {e}") diff --git a/src/backend/models.py b/src/backend/models.py index b06e0d7..4c7da03 100644 --- a/src/backend/models.py +++ b/src/backend/models.py @@ -1090,26 +1090,24 @@ class ChatMessageSkillAssessment(ChatMessageUser): skill_assessment: SkillAssessment = Field(..., alias=str("skillAssessment")) -class ChatMessageResume(ChatMessageUser): - role: ChatSenderType = ChatSenderType.ASSISTANT - metadata: ChatMessageMetaData = Field(default=ChatMessageMetaData()) - resume: str = Field(..., alias=str("resume")) - system_prompt: Optional[str] = Field(default=None, alias=str("systemPrompt")) - prompt: Optional[str] = Field(default=None, alias=str("prompt")) - model_config = ConfigDict(populate_by_name=True) - - class Resume(BaseModel): id: str = Field(default_factory=lambda: str(uuid.uuid4())) job_id: str = Field(..., alias=str("jobId")) candidate_id: str = Field(..., alias=str("candidateId")) - resume: str = Field(..., alias=str("resume")) + resume: str + system_prompt: Optional[str] = Field(default=None) + prompt: Optional[str] = Field(default=None) created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("createdAt")) updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("updatedAt")) job: Optional[Job] = None candidate: Optional[Candidate] = None model_config = ConfigDict(populate_by_name=True) +class ChatMessageResume(ChatMessageUser): + role: ChatSenderType = ChatSenderType.ASSISTANT + metadata: ChatMessageMetaData = Field(default=ChatMessageMetaData()) + resume: Resume + model_config = ConfigDict(populate_by_name=True) class ResumeMessage(ChatMessageUser): role: ChatSenderType = ChatSenderType.ASSISTANT diff --git a/src/backend/routes/candidates.py b/src/backend/routes/candidates.py index 4104668..f73264f 100644 --- a/src/backend/routes/candidates.py +++ b/src/backend/routes/candidates.py @@ -1850,6 +1850,7 @@ async def generate_resume( return resume: ChatMessageResume = final_message + resume.resume.job_id = job.id yield resume return diff --git a/src/backend/routes/jobs.py b/src/backend/routes/jobs.py index bef9715..2a3a9a1 100644 --- a/src/backend/routes/jobs.py +++ b/src/backend/routes/jobs.py @@ -17,7 +17,7 @@ from markitdown import MarkItDown, StreamInfo import backstory_traceback as backstory_traceback import defines from agents.base import CandidateEntity -from utils.helpers import create_job_from_content, filter_and_paginate, get_document_type_from_filename +from utils.helpers import filter_and_paginate, get_document_type_from_filename from database.manager import RedisDatabase from logger import logger from models import ( @@ -67,7 +67,8 @@ async def reformat_as_markdown(database: RedisDatabase, candidate_entity: Candid prompt=content, system_prompt=""" You are a document editor. Take the provided job description and reformat as legible markdown. -Return only the markdown content, no other text. Make sure all content is included. +Return only the markdown content, no other text. Make sure all content is included. If the +content is already in markdown format, return it as is. """, ): pass diff --git a/src/backend/routes/resumes.py b/src/backend/routes/resumes.py index 1bb628a..b32cc6e 100644 --- a/src/backend/routes/resumes.py +++ b/src/backend/routes/resumes.py @@ -2,8 +2,8 @@ Resume Routes """ import json -from datetime import datetime, UTC from typing import List +import uuid from fastapi import APIRouter, HTTPException, Depends, Body, Path, Query from fastapi.responses import StreamingResponse @@ -18,37 +18,39 @@ from utils.responses import create_success_response # Create router for authentication endpoints router = APIRouter(prefix="/resumes", tags=["resumes"]) - -@router.post("/{candidate_id}/{job_id}") +@router.post("") async def create_candidate_resume( - candidate_id: str = Path(..., description="ID of the candidate"), - job_id: str = Path(..., description="ID of the job"), - resume_content: str = Body(...), + resume: Resume = Body(...), current_user=Depends(get_current_user), database: RedisDatabase = Depends(get_database), ): """Create a new resume for a candidate/job combination""" - async def message_stream_generator(): - logger.info(f"🔍 Looking up candidate and job details for {candidate_id}/{job_id}") + # Ignore the resume ID if provided, generate a new one + resume.id = str(uuid.uuid4()) - candidate_data = await database.get_candidate(candidate_id) + logger.info(f"📝 Creating resume for candidate {resume.candidate_id} for job {resume.job_id}") + + async def message_stream_generator(): + logger.info(f"🔍 Looking up candidate and job details for {resume.candidate_id}/{resume.job_id}") + + candidate_data = await database.get_candidate(resume.candidate_id) if not candidate_data: - logger.error(f"❌ Candidate with ID '{candidate_id}' not found") + logger.error(f"❌ Candidate with ID '{resume.candidate_id}' not found") error_message = ChatMessageError( session_id=MOCK_UUID, # No session ID for document uploads - content=f"Candidate with ID '{candidate_id}' not found", + content=f"Candidate with ID '{resume.candidate_id}' not found", ) yield error_message return candidate = Candidate.model_validate(candidate_data) - job_data = await database.get_job(job_id) + job_data = await database.get_job(resume.job_id) if not job_data: - logger.error(f"❌ Job with ID '{job_id}' not found") + logger.error(f"❌ Job with ID '{resume.job_id}' not found") error_message = ChatMessageError( session_id=MOCK_UUID, # No session ID for document uploads - content=f"Job with ID '{job_id}' not found", + content=f"Job with ID '{resume.job_id}' not found", ) yield error_message return @@ -58,12 +60,6 @@ async def create_candidate_resume( f"📄 Saving resume for candidate {candidate.first_name} {candidate.last_name} for job '{job.title}'" ) - # Job and Candidate are valid. Save the resume - resume = Resume( - job_id=job_id, - candidate_id=candidate_id, - resume=resume_content, - ) resume_message: ResumeMessage = ResumeMessage( session_id=MOCK_UUID, # No session ID for document uploads resume=resume, @@ -240,29 +236,27 @@ async def get_resume_statistics( logger.error(f"❌ Error retrieving resume statistics for user {current_user.id}: {e}") raise HTTPException(status_code=500, detail="Failed to retrieve resume statistics") - -@router.put("/{resume_id}") +@router.patch("") async def update_resume( - resume_id: str = Path(..., description="ID of the resume"), - resume: str = Body(..., description="Updated resume content"), + resume: Resume = Body(...), current_user=Depends(get_current_user), database: RedisDatabase = Depends(get_database), ): """Update the content of a specific resume""" try: - updates = {"resume": resume, "updated_at": datetime.now(UTC).isoformat()} - - updated_resume_data = await database.update_resume(current_user.id, resume_id, updates) + updates = resume.model_dump() + updated_resume_data = await database.update_resume(current_user.id, resume.id, updates) if not updated_resume_data: - logger.warning(f"⚠️ Resume {resume_id} not found for user {current_user.id}") + logger.warning(f"⚠️ Resume {resume.id} not found for user {current_user.id}") raise HTTPException(status_code=404, detail="Resume not found") updated_resume = Resume.model_validate(updated_resume_data) if updated_resume_data else None - - return create_success_response( - {"success": True, "message": f"Resume {resume_id} updated successfully", "resume": updated_resume} - ) + if not updated_resume: + logger.warning(f"⚠️ Resume {resume.id} could not be updated for user {current_user.id}") + raise HTTPException(status_code=400, detail="Failed to update resume") + return create_success_response(updated_resume.model_dump(by_alias=True)) + except HTTPException: raise except Exception as e: - logger.error(f"❌ Error updating resume {resume_id} for user {current_user.id}: {e}") + logger.error(f"❌ Error updating resume {resume.id} for user {current_user.id}: {e}") raise HTTPException(status_code=500, detail="Failed to update resume") diff --git a/src/backend/utils/helpers.py b/src/backend/utils/helpers.py index a1073a5..e4ba74b 100644 --- a/src/backend/utils/helpers.py +++ b/src/backend/utils/helpers.py @@ -13,10 +13,8 @@ from fastapi.responses import StreamingResponse import defines from logger import logger from models import DocumentType -from models import Job, ChatMessage, DocumentType, ApiStatusType +from models import Job, ChatMessage, ApiStatusType -from typing import List, Dict -from models import Job import utils.llm_proxy as llm_manager diff --git a/src/backend/utils/llm_proxy.py b/src/backend/utils/llm_proxy.py index 4684ded..4edf13f 100644 --- a/src/backend/utils/llm_proxy.py +++ b/src/backend/utils/llm_proxy.py @@ -230,7 +230,7 @@ class OllamaAdapter(BaseLLMAdapter): else: response = await self.client.chat(model=model, messages=ollama_messages, stream=False, **kwargs) - usage_stats = self._create_usage_stats(response) + usage_stats = self._create_usage_stats(response.model_dump()) return ChatResponse( content=response["message"]["content"], @@ -267,7 +267,7 @@ class OllamaAdapter(BaseLLMAdapter): else: response = await self.client.generate(model=model, prompt=prompt, stream=False, **kwargs) - usage_stats = self._create_usage_stats(response) + usage_stats = self._create_usage_stats(response.model_dump()) return ChatResponse( content=response["response"], model=model, finish_reason=response.get("done_reason"), usage=usage_stats @@ -312,7 +312,7 @@ class OllamaAdapter(BaseLLMAdapter): # Create usage stats if available from the last response usage_stats = None if final_response and len(results) == 1: - usage_stats = self._create_usage_stats(final_response) + usage_stats = self._create_usage_stats(final_response.model_dump()) return EmbeddingResponse(data=results, model=model, usage=usage_stats) @@ -326,7 +326,7 @@ class OpenAIAdapter(BaseLLMAdapter): def __init__(self, **config): super().__init__(**config) - import openai + import openai # type: ignore self.client = openai.AsyncOpenAI(api_key=config.get("api_key", os.getenv("OPENAI_API_KEY"))) @@ -425,7 +425,7 @@ class AnthropicAdapter(BaseLLMAdapter): def __init__(self, **config): super().__init__(**config) - import anthropic + import anthropic # type: ignore self.client = anthropic.AsyncAnthropic(api_key=config.get("api_key", os.getenv("ANTHROPIC_API_KEY"))) @@ -524,7 +524,7 @@ class GeminiAdapter(BaseLLMAdapter): def __init__(self, **config): super().__init__(**config) - import google.generativeai as genai + import google.generativeai as genai # type: ignore genai.configure(api_key=config.get("api_key", os.getenv("GEMINI_API_KEY"))) self.genai = genai @@ -720,7 +720,8 @@ class UnifiedLLMProxy: if stream is False: raise ValueError("stream must be True for chat_stream") result = await self.chat(model, messages, provider, stream=True, **kwargs) - # Type checker now knows this is an AsyncGenerator due to stream=True + if isinstance(result, ChatResponse): + raise RuntimeError("Expected AsyncGenerator, got ChatResponse") async for chunk in result: yield chunk @@ -734,7 +735,8 @@ class UnifiedLLMProxy: """Get single chat response using specified or default provider""" result = await self.chat(model, messages, provider, stream=False, **kwargs) - # Type checker now knows this is a ChatResponse due to stream=False + if not isinstance(result, ChatResponse): + raise RuntimeError("Expected ChatResponse, got AsyncGenerator") return result async def generate( @@ -753,6 +755,8 @@ class UnifiedLLMProxy: """Stream text generation using specified or default provider""" result = await self.generate(model, prompt, provider, stream=True, **kwargs) + if isinstance(result, ChatResponse): + raise RuntimeError("Expected AsyncGenerator, got ChatResponse") async for chunk in result: yield chunk @@ -762,6 +766,8 @@ class UnifiedLLMProxy: """Get single generation response using specified or default provider""" result = await self.generate(model, prompt, provider, stream=False, **kwargs) + if not isinstance(result, ChatResponse): + raise RuntimeError("Expected ChatResponse, got AsyncGenerator") return result async def embeddings(