diff --git a/src/utils/agents/base.py b/src/utils/agents/base.py index a5c890a..bb40755 100644 --- a/src/utils/agents/base.py +++ b/src/utils/agents/base.py @@ -287,7 +287,7 @@ class Agent(BaseModel, ABC): message.metadata["timers"]["llm_with_tools"] = f"{(end_time - start_time):.4f}" return - async def generate_llm_response(self, llm: Any, model: str, message: Message) -> AsyncGenerator[Message, None]: + async def generate_llm_response(self, llm: Any, model: str, message: Message, temperature = 0.7) -> AsyncGenerator[Message, None]: logger.info(f"{self.agent_type} - {inspect.stack()[0].function}") self.metrics.generate_count.labels(agent=self.agent_type).inc() @@ -312,7 +312,7 @@ class Agent(BaseModel, ABC): message.metadata["options"]={ "seed": 8911, "num_ctx": self.context_size, - #"temperature": 0.9, # Higher temperature to encourage tool usage + "temperature": temperature # Higher temperature to encourage tool usage } # Create a dict for storing various timing stats diff --git a/src/utils/agents/job_description.py b/src/utils/agents/job_description.py index 6e19af4..604f7ba 100644 --- a/src/utils/agents/job_description.py +++ b/src/utils/agents/job_description.py @@ -1,15 +1,19 @@ from __future__ import annotations -from pydantic import model_validator # type: ignore -from typing import Literal, ClassVar, Optional, Any, AsyncGenerator, List # NOTE: You must import Optional for late binding to work +from pydantic import model_validator, Field # type: ignore +from typing import Dict, Literal, ClassVar, Optional, Any, AsyncGenerator, List # NOTE: You must import Optional for late binding to work from datetime import datetime import inspect +import re +import json +import traceback -from . base import Agent, agent_registry +from . base import Agent, agent_registry, LLMMessage from .. conversation import Conversation from .. message import Message from .. setup_logging import setup_logging logger = setup_logging() + system_generate_resume = """ You are an objective skills analyzer for resume tailoring. Your task is to analyze a job description and a candidate's background, identifying ONLY @@ -95,33 +99,50 @@ When answering queries, follow these steps: - Avoid phrases like 'According to the <|context|>' or similar references to the <|context|>, <|job_description|>, <|resume|>, or <|context|> tags. """.strip() +system_user_qualifications = """ +You are an objective job requirements analyzer. Your task is to extract and categorize the specific skills, experiences, and qualifications required in a job description WITHOUT any reference to any candidate. + +## INSTRUCTIONS: + +1. Analyze ONLY the job description provided. +2. Extract and categorize all requirements and preferences. +3. DO NOT consider any candidate information - this is a pure job analysis task. + +## OUTPUT FORMAT: + +```json +{ + "job_requirements": { + "technical_skills": { + "required": ["skill1", "skill2"...], + "preferred": ["skill1", "skill2"...] + }, + "experience_requirements": { + "required": ["exp1", "exp2"...], + "preferred": ["exp1", "exp2"...] + }, + "education_requirements": ["req1", "req2"...], + "soft_skills": ["skill1", "skill2"...], + "industry_knowledge": ["knowledge1", "knowledge2"...], + "responsibilities": ["resp1", "resp2"...], + "company_values": ["value1", "value2"...] + } +} +``` + +Be specific and detailed in your extraction. Break down compound requirements into individual components. +For example, "5+ years experience with React, Node.js and MongoDB" should be separated into: +- Experience: "5+ years software development" +- Technical skills: "React", "Node.js", "MongoDB" + +Avoid vague categorizations and be precise about whether skills are explicitly required or just preferred. + + +<|job_description|> +[INSERT JOB DESCRIPTION HERE] + +""" -# def run_resume_pipeline(job_description, resume, context): -# # Stage 1: Analysis -# analysis_prompt = create_analysis_prompt(job_description, resume, context) -# job_analysis = call_llm_with_prompt(analysis_prompt) - -# # Validate analysis output -# validated_analysis = validate_job_analysis(job_analysis) -# if not validated_analysis: -# return {"error": "Analysis stage failed validation"} - -# # Stage 2: Generation -# generation_prompt = create_generation_prompt(validated_analysis, resume) -# tailored_resume = call_llm_with_prompt(generation_prompt) - -# # Stage 3: Verification -# verification_prompt = create_verification_prompt(validated_analysis, resume, context, tailored_resume) -# verification_result = call_llm_with_prompt(verification_prompt) - -# # Process verification results -# if verification_result.get("verification_results", {}).get("overall_assessment") == "APPROVED": -# return {"status": "success", "resume": tailored_resume} -# else: -# # Optional: Implement correction loop -# corrected_resume = apply_corrections(tailored_resume, verification_result) -# return {"status": "corrected", "resume": corrected_resume} - class JobDescription(Agent): agent_type: Literal["job_description"] = "job_description" # type: ignore _agent_type: ClassVar[str] = agent_type # Add this for registration @@ -129,6 +150,9 @@ class JobDescription(Agent): system_prompt: str = system_generate_resume job_description: str + llm: Any = Field(default=None, exclude=True) + model: str = Field(default=None, exclude=True) + @model_validator(mode="after") def validate_job_description(self): if not self.job_description.strip(): @@ -181,6 +205,8 @@ class JobDescription(Agent): if message.status != "done": yield message + self.system_prompt = system_user_qualifications + resume_agent = self.context.get_agent(agent_type="resume") if not resume_agent: # Switch agent from "Create Resume from Job Desription" mode @@ -190,14 +216,806 @@ class JobDescription(Agent): # Instantiate the "resume" agent, and seed (or reset) its conversation # with this message. resume_agent = self.context.get_or_create_agent(agent_type="resume", resume=message.response) - first_resume_message = message.copy() + first_resume_message = message.model_copy() first_resume_message.prompt = "Generate a resume for the job description." resume_agent.conversation.add(first_resume_message) message.response = "Resume generated." - + # Return the final message yield message return + # BEGIN + + # Helper functions + def extract_json_from_text(self, text: str) -> str: + """Extract JSON string from text that may contain other content.""" + json_pattern = r'```json\s*([\s\S]*?)\s*```' + match = re.search(json_pattern, text) + if match: + return match.group(1).strip() + + # Try to find JSON without the markdown code block + json_pattern = r'({[\s\S]*})' + match = re.search(json_pattern, text) + if match: + return match.group(1).strip() + + raise ValueError("No JSON found in the response") + + def validate_job_requirements(self, job_requirements: Dict) -> None: + """Validate the structure of job requirements.""" + required_keys = ["job_requirements"] + + if not all(key in job_requirements for key in required_keys): + missing = [key for key in required_keys if key not in job_requirements] + raise ValueError(f"Missing required keys in job requirements: {missing}") + + # Additional validation can be added here + + def validate_candidate_qualifications(self, candidate_qualifications: Dict) -> None: + """Validate the structure of candidate qualifications.""" + required_keys = ["candidate_qualifications"] + + if not all(key in candidate_qualifications for key in required_keys): + missing = [key for key in required_keys if key not in candidate_qualifications] + raise ValueError(f"Missing required keys in candidate qualifications: {missing}") + + # Additional validation can be added here + + def validate_skills_mapping(self, skills_mapping: Dict) -> None: + """Validate the structure of skills mapping.""" + required_keys = ["skills_mapping", "resume_recommendations"] + + if not all(key in skills_mapping for key in required_keys): + missing = [key for key in required_keys if key not in skills_mapping] + raise ValueError(f"Missing required keys in skills mapping: {missing}") + + # Additional validation can be added here + + def extract_header_from_resume(self, resume: str) -> str: + """Extract header information from the original resume.""" + # Simple implementation - in practice, you might want a more sophisticated approach + lines = resume.strip().split("\n") + # Take the first few non-empty lines as the header + header_lines = [] + for line in lines[:10]: # Arbitrarily choose first 10 lines to search + if line.strip(): + header_lines.append(line) + if len(header_lines) >= 4: # Assume header is no more than 4 lines + break + return "\n".join(header_lines) + + # Stage 1A: Job Analysis Implementation + def create_job_analysis_prompt(self, job_description: str) -> tuple[str, str]: + """Create the prompt for job requirements analysis.""" + system_prompt = """ + You are an objective job requirements analyzer. Your task is to extract and categorize the specific skills, + experiences, and qualifications required in a job description WITHOUT any reference to any candidate. + + ## INSTRUCTIONS: + + 1. Analyze ONLY the job description provided. + 2. Extract and categorize all requirements and preferences. + 3. DO NOT consider any candidate information - this is a pure job analysis task. + + ## OUTPUT FORMAT: + + ```json + { + "job_requirements": { + "technical_skills": { + "required": ["skill1", "skill2"], + "preferred": ["skill1", "skill2"] + }, + "experience_requirements": { + "required": ["exp1", "exp2"], + "preferred": ["exp1", "exp2"] + }, + "education_requirements": ["req1", "req2"], + "soft_skills": ["skill1", "skill2"], + "industry_knowledge": ["knowledge1", "knowledge2"], + "responsibilities": ["resp1", "resp2"], + "company_values": ["value1", "value2"] + } + } + ``` + + Be specific and detailed in your extraction. Break down compound requirements into individual components. + For example, "5+ years experience with React, Node.js and MongoDB" should be separated into: + - Experience: "5+ years software development" + - Technical skills: "React", "Node.js", "MongoDB" + + Avoid vague categorizations and be precise about whether skills are explicitly required or just preferred. + """ + + prompt = f"Job Description:\n{job_description}" + return system_prompt, prompt + + async def analyze_job_requirements(self, message, job_description: str) -> AsyncGenerator[Message, None]: + """Analyze job requirements from job description.""" + try: + system_prompt, prompt = self.create_job_analysis_prompt(job_description) + async for message in self.call_llm(message, system_prompt, prompt): + if message.status != "done": + yield message + if message.status == "error": + return + + # Extract JSON from response + json_str = self.extract_json_from_text(message.response) + job_requirements = json.loads(json_str) + + self.validate_job_requirements(job_requirements) + + message.status = "done" + message.response = json_str + yield message + return + + except Exception as e: + message.status = "error" + message.response = f"Error in job requirements analysis: {str(e)}" + logger.error(message.response) + logger.error(traceback.format_exc()) + yield message + raise + + # Stage 1B: Candidate Analysis Implementation + def create_candidate_analysis_prompt(self, resume: str, context: str) -> tuple[str, str]: + """Create the prompt for candidate qualifications analysis.""" + system_prompt = """ + You are an objective resume analyzer. Your task is to catalog ALL skills, experiences, and qualifications + present in a candidate's materials WITHOUT any reference to any job description. + + ## INSTRUCTIONS: + + 1. Analyze ONLY the candidate's resume and context provided. + 2. Create a comprehensive inventory of the candidate's actual qualifications. + 3. DO NOT consider any job requirements - this is a pure candidate analysis task. + 4. For each qualification, cite exactly where in the materials it appears. + + ## OUTPUT FORMAT: + + ```json + { + "candidate_qualifications": { + "technical_skills": [ + { + "skill": "skill name", + "evidence": "exact quote from materials", + "source": "resume or context", + "expertise_level": "explicit level mentioned or 'unspecified'" + } + ], + "work_experience": [ + { + "role": "job title", + "company": "company name", + "duration": "time period", + "responsibilities": ["resp1", "resp2"], + "technologies_used": ["tech1", "tech2"], + "achievements": ["achievement1", "achievement2"] + } + ], + "education": [ + { + "degree": "degree name", + "institution": "institution name", + "completed": true/false, + "evidence": "exact quote from materials" + } + ], + "projects": [ + { + "name": "project name", + "description": "brief description", + "technologies_used": ["tech1", "tech2"], + "evidence": "exact quote from materials" + } + ], + "soft_skills": [ + { + "skill": "skill name", + "evidence": "exact quote or inference basis", + "source": "resume or context" + } + ] + } + } + ``` + + Be thorough and precise. Include ONLY skills and experiences explicitly mentioned in the materials. + For each entry, provide the exact text evidence from the materials that supports its inclusion. + Do not make assumptions about skills based on job titles or project names - only include skills explicitly mentioned. + """ + + prompt = f"Resume:\n{resume}\n\nAdditional Context:\n{context}" + return system_prompt, prompt + + async def call_llm(self, message: Message, system_prompt, prompt, temperature=0.7): + messages : List[LLMMessage] = [ + LLMMessage(role="system", content=system_prompt), + LLMMessage(role="user", content=prompt) + ] + message.metadata["options"]={ + "seed": 8911, + "num_ctx": self.context_size, + "temperature": temperature # Higher temperature to encourage tool usage + } + + message.status = "streaming" + yield message + + message.response = "" + for response in self.llm.chat( + model=self.model, + messages=messages, + options={ + **message.metadata["options"], + }, + stream=True, + ): + if not response: + message.status = "error" + message.response = "No response from LLM." + yield message + return + + message.status = "streaming" + message.response += response.message.content + + if not response.done: + yield message + + if response.done: + message.metadata["eval_count"] += response.eval_count + message.metadata["eval_duration"] += response.eval_duration + message.metadata["prompt_eval_count"] += response.prompt_eval_count + message.metadata["prompt_eval_duration"] += response.prompt_eval_duration + self.context_tokens = response.prompt_eval_count + response.eval_count + message.status = "done" + yield message + + async def analyze_candidate_qualifications(self, message: Message, resume: str, context: str) -> AsyncGenerator[Message, None]: + """Analyze candidate qualifications from resume and context.""" + try: + system_prompt, prompt = self.create_candidate_analysis_prompt(resume, context) + async for message in self.call_llm(message, system_prompt, prompt): + if message.status != "done": + yield message + if message.status == "error": + return + + # Extract JSON from response + json_str = self.extract_json_from_text(message.response) + candidate_qualifications = json.loads(json_str) + + # Validate structure + self.validate_candidate_qualifications(candidate_qualifications) + message.status = "done" + message.response = json.dumps(candidate_qualifications) + return + + except Exception as e: + message.status = "error" + message.response = f"Error in candidate qualifications analysis: {str(e)}" + logger.error(message.response) + logger.error(traceback.format_exc()) + yield message + raise + + # Stage 1C: Mapping Analysis Implementation + def create_mapping_analysis_prompt(self ,job_requirements: Dict, candidate_qualifications: Dict) -> tuple[str, str]: + """Create the prompt for mapping analysis.""" + system_prompt = """ + You are an objective skills mapper. Your task is to identify legitimate matches between job requirements + and candidate qualifications WITHOUT fabricating or stretching the truth. + + ## INSTRUCTIONS: + + 1. Use ONLY the structured job requirements and candidate qualifications provided. + 2. Create a mapping that shows where the candidate's actual skills and experiences align with job requirements. + 3. Identify gaps where the candidate lacks required qualifications. + 4. Suggest legitimate transferable skills ONLY when there is reasonable evidence. + + ## OUTPUT FORMAT: + + ```json + { + "skills_mapping": { + "direct_matches": [ + { + "job_requirement": "required skill", + "candidate_qualification": "matching skill", + "evidence": "exact quote from candidate materials" + } + ], + "transferable_skills": [ + { + "job_requirement": "required skill", + "candidate_qualification": "transferable skill", + "reasoning": "explanation of legitimate transferability", + "evidence": "exact quote from candidate materials" + } + ], + "gap_analysis": { + "missing_required_skills": ["skill1", "skill2"], + "missing_preferred_skills": ["skill1", "skill2"], + "missing_experience": ["exp1", "exp2"] + } + }, + "resume_recommendations": { + "highlight_points": [ + { + "qualification": "candidate's qualification", + "relevance": "why this is highly relevant to the job" + } + ], + "transferable_narratives": [ + { + "from": "candidate's actual experience", + "to": "job requirement", + "suggested_framing": "how to honestly present the transfer" + } + ], + "honest_limitations": [ + "frank assessment of major qualification gaps" + ] + } + } + ``` + + CRITICAL RULES: + 1. A "direct match" requires the EXACT SAME skill in both job requirements and candidate qualifications + 2. A "transferable skill" must have legitimate, defensible connection - do not stretch credibility + 3. All "missing_required_skills" MUST be acknowledged - do not ignore major gaps + 4. Every match or transfer claim must cite specific evidence from the candidate materials + 5. Be conservative in claiming transferability - when in doubt, list as missing + """ + + prompt = f"Job Requirements:\n{json.dumps(job_requirements, indent=2)}\n\n" + prompt += f"Candidate Qualifications:\n{json.dumps(candidate_qualifications, indent=2)}" + return system_prompt, prompt + + async def create_skills_mapping(self, message, job_requirements: Dict, candidate_qualifications: Dict) -> AsyncGenerator[Message, None]: + """Create mapping between job requirements and candidate qualifications.""" + try: + system_prompt, prompt = self.create_mapping_analysis_prompt(job_requirements, candidate_qualifications) + async for message in self.call_llm(message, system_prompt, prompt): + if message != "done": + yield message + if message.status == "error": + return + + # Extract JSON from response + json_str = self.extract_json_from_text(message.response) + skills_mapping = json.loads(json_str) + + # Validate structure + self.validate_skills_mapping(skills_mapping) + + message.status = "done" + message.response = json_str + yield message + return + + except Exception as e: + message.status = "error" + message.response = f"Error in skills mapping analysis: {str(e)}" + logger.error(message.response) + logger.error(traceback.format_exc()) + yield message + raise + + # Stage 2: Resume Generation Implementation + def create_resume_generation_prompt(self, skills_mapping: Dict, candidate_qualifications: Dict, original_header: str) -> tuple[str, str]: + """Create the prompt for resume generation.""" + system_prompt = """ + You are a professional resume writer whose primary concern is FACTUAL ACCURACY. Your task is to create + a tailored resume that presents the candidate's actual qualifications in the most relevant way for this job, + using ONLY information that has been verified in the skills mapping. + + ## INSTRUCTIONS: + + 1. Use ONLY the information provided in the skills mapping JSON + 2. Each skill, experience, or achievement you include MUST appear in either "direct_matches" or "transferable_skills" + 3. DO NOT include skills listed in "missing_required_skills" or "missing_preferred_skills" + 4. Format a professional resume with these sections: + - Header with name and contact information (exactly as provided in original resume) + - Professional Summary (focused on verified matching and transferable skills) + - Skills (ONLY from "direct_matches" and "transferable_skills" sections) + - Professional Experience (highlighting experiences referenced in the mapping) + - Education (exactly as listed in the candidate qualifications) + + 5. Follow these principles: + - Use the exact wording from "highlight_points" and "transferable_narratives" when describing experiences + - Maintain original job titles, companies, and dates exactly as provided + - Use achievement-oriented language that emphasizes results and impact + - Prioritize experiences that directly relate to the job requirements + + ## EVIDENCE REQUIREMENT: + + For each skill or qualification you include in the resume, you MUST be able to trace it to: + 1. A specific entry in "direct_matches" or "transferable_skills", AND + 2. The original evidence citation in the candidate qualifications + + If you cannot meet both these requirements for any content, DO NOT include it. + + ## FORMAT REQUIREMENTS: + + - Create a clean, professional resume format + - Use consistent formatting for similar elements + - Ensure readability with appropriate white space + - Use bullet points for skills and achievements + - Include a final note: "Note: Initial draft of the resume was generated using the Backstory application written by James Ketrenos." + + ## FINAL VERIFICATION: + + Before completing the resume: + 1. Check that EVERY skill listed appears in either "direct_matches" or "transferable_skills" + 2. Verify that no skills from "missing_required_skills" are included + 3. Ensure all experience descriptions can be traced to evidence in candidate qualifications + 4. Confirm that transferable skills are presented honestly without exaggeration + """ + + prompt = f"Skills Mapping:\n{json.dumps(skills_mapping, indent=2)}\n\n" + prompt += f"Candidate Qualifications:\n{json.dumps(candidate_qualifications, indent=2)}\n\n" + prompt += f"Original Resume Header:\n{original_header}" + return system_prompt, prompt + + async def generate_tailored_resume(self, message, skills_mapping: Dict, candidate_qualifications: Dict, original_header: str) -> AsyncGenerator[Message, None]: + """Generate a tailored resume based on skills mapping.""" + try: + system_prompt, prompt = self.create_resume_generation_prompt(skills_mapping, candidate_qualifications, original_header) + async for message in self.call_llm(message, system_prompt, prompt, temperature=0.4): # Slightly higher temperature for better writing + if message.status != "done": + yield message + if message.status == "error": + return + message.status = "done" + yield message + return + + except Exception as e: + message.status = "error" + message.response = f"Error in resume generation: {str(e)}" + logger.error(message.response) + logger.error(traceback.format_exc()) + yield message + raise + + + # Stage 3: Verification Implementation + def create_verification_prompt(self, generated_resume: str, skills_mapping: Dict, candidate_qualifications: Dict) -> tuple[str, str]: + """Create the prompt for resume verification.""" + system_prompt = """ + You are a critical resume fact-checker responsible for verifying the accuracy of a tailored resume. + Your task is to identify and flag any fabricated or embellished information that does not appear in + the candidate's original materials. + + ## INSTRUCTIONS: + + 1. Compare the tailored resume against: + - The structured skills mapping + - The candidate's original qualifications + + 2. Perform a line-by-line verification focusing on: + - Skills claimed vs. skills verified in original materials + - Experience descriptions vs. actual documented experience + - Projects and achievements vs. documented accomplishments + - Technical knowledge claims vs. verified technical background + + 3. Create a verification report with these sections: + + ## OUTPUT FORMAT: + + ```json + { + "verification_results": { + "factual_accuracy": { + "status": "PASS/FAIL", + "issues": [ + { + "claim": "The specific claim in the resume", + "issue": "Why this is problematic", + "source_check": "Result of checking against source materials", + "suggested_correction": "How to fix this issue" + } + ] + }, + "skill_verification": { + "status": "PASS/FAIL", + "unverified_skills": ["skill1", "skill2"] + }, + "experience_verification": { + "status": "PASS/FAIL", + "problematic_statements": [ + { + "statement": "The problematic experience statement", + "issue": "Why this is problematic", + "suggested_correction": "How to fix this issue" + } + ] + }, + "overall_assessment": "APPROVED/NEEDS REVISION", + "correction_instructions": "Specific instructions for correcting the resume" + } + } + ``` + + ## CRITICAL VERIFICATION CRITERIA: + + 1. Any skill mentioned in the resume MUST appear verbatim in the skills mapping + 2. Any technology experience claimed MUST be explicitly documented in original materials + 3. Role descriptions must not imply expertise with technologies not listed in original materials + 4. "Transferable skills" must be reasonably transferable, not stretches or fabrications + 5. Job titles, dates, and companies must match exactly with original materials + 6. Professional summary must not imply experience with technologies from the job description that aren't in the candidate's background + + ## SPECIAL ATTENTION: + + Pay particular attention to subtle fabrications such as: + - Vague wording that implies experience ("worked with", "familiar with", "utilized") with technologies not in original materials + - Reframing unrelated experience to falsely imply relevance to the job requirements + - Adding technologies to project descriptions that weren't mentioned in the original materials + - Exaggerating level of involvement or responsibility in projects or roles + - Creating achievements that weren't documented in the original materials + """ + + prompt = f"Tailored Resume:\n{generated_resume}\n\n" + prompt += f"Skills Mapping:\n{json.dumps(skills_mapping, indent=2)}\n\n" + prompt += f"Candidate Qualifications:\n{json.dumps(candidate_qualifications, indent=2)}" + return system_prompt, prompt + + async def verify_resume(self, message: Message, generated_resume: str, skills_mapping: Dict, candidate_qualifications: Dict) -> AsyncGenerator[Message, None]: + """Verify the generated resume for accuracy against original materials.""" + try: + system_prompt, prompt = self.create_verification_prompt(generated_resume, skills_mapping, candidate_qualifications) + async for message in self.call_llm(message, system_prompt, prompt): + if message.status != "done": + yield message + + # Extract JSON from response + json_str = self.extract_json_from_text(message.response) + verification_results = json.loads(json_str) + + message.status = "done" + message.response = json_str + yield message + return + except Exception as e: + message.status = "error" + message.response = f"Error in resume verification: {str(e)}" + logger.error(message.response) + logger.error(traceback.format_exc()) + yield message + raise + + async def correct_resume_issues(self, message: Message, generated_resume: str, verification_results: Dict, skills_mapping: Dict, candidate_qualifications: Dict, original_header: str) -> AsyncGenerator[Message, None]: + """Correct issues in the resume based on verification results.""" + if verification_results["verification_results"]["overall_assessment"] == "APPROVED": + message.status = "done" + message.status = generated_resume + yield message + return + + system_prompt = """ + You are a professional resume editor with a focus on factual accuracy. Your task is to correct + the identified issues in a tailored resume according to the verification report. + + ## INSTRUCTIONS: + + 1. Make ONLY the changes specified in the verification report + 2. Ensure all corrections maintain factual accuracy based on the skills mapping + 3. Do not introduce any new claims or skills not present in the verification data + 4. Maintain the original format and structure of the resume + + ## PROCESS: + + 1. For each issue in the verification report: + - Identify the problematic text in the resume + - Replace it with the suggested correction + - Ensure the correction is consistent with the rest of the resume + + 2. After making all corrections: + - Review the revised resume for consistency + - Ensure no factual inaccuracies have been introduced + - Check that all formatting remains professional + + Return the fully corrected resume. + """ + + prompt = f"Original Resume:\n{generated_resume}\n\n" + prompt += f"Verification Results:\n{json.dumps(verification_results, indent=2)}\n\n" + prompt += f"Skills Mapping:\n{json.dumps(skills_mapping, indent=2)}\n\n" + prompt += f"Candidate Qualifications:\n{json.dumps(candidate_qualifications, indent=2)}\n\n" + prompt += f"Original Resume Header:\n{original_header}" + + try: + async for message in self.call_llm(message, prompt, system_prompt, temperature=0.3): + if message.status != "done": + yield message + yield message + if message.status == "error": + return + + except Exception as e: + message.status = "error" + message.response = f"Error in resume correction: {str(e)}" + logger.error(message.response) + logger.error(traceback.format_exc()) + yield message + raise + + async def generate_factual_tailored_resume(self, message: Message, job_description: str, resume: str, additional_context: str = "") -> AsyncGenerator[Message, None]: + """ + Main function to generate a factually accurate tailored resume. + + Args: + job_description: The job description text + resume: The candidate's original resume text + additional_context: Any additional context about the candidate (optional) + + Returns: + Dict containing the generated resume and supporting analysis + """ + try: + message.status = "thinking" + message.response = "Starting multi-stage RAG resume generation process" + logger.info(message.response) + yield message + + # Stage 1A: Analyze job requirements + message.response = "Stage 1A: Analyzing job requirements" + logger.info(message.response) + yield message + + async for message in self.analyze_job_requirements(message, job_description): + if message.status != "done": + yield message + if message.status == "error": + return + + job_requirements = json.loads(message.response) + + # Stage 1B: Analyze candidate qualifications + message.status = "thinking" + message.response = "Stage 1B: Analyzing candidate qualifications" + logger.info(message.response) + yield message + + async for message in self.analyze_candidate_qualifications(message, resume, additional_context): + if message.status != "done": + yield message + if message.status == "error": + return + + candidate_qualifications = json.loads(message.response) + + # Stage 1C: Create skills mapping + message.status = "thinking" + message.response = "Stage 1C: Creating skills mapping" + logger.info(message.response) + yield message + + async for message in self.create_skills_mapping(message, job_requirements, candidate_qualifications): + if message.status != "done": + yield message + if message.status == "error": + return + + skills_mapping = json.loads(message.response) + + # Extract header from original resume + original_header = self.extract_header_from_resume(resume) + + # Stage 2: Generate tailored resume + message.status = "thinking" + message.response = "Stage 2: Generating tailored resume" + logger.info(message.response) + yield message + + async for message in self.generate_tailored_resume(message, skills_mapping, candidate_qualifications, original_header): + if message.status != "done": + yield message + if message.status == "error": + return + + generated_resume = message.response + + # Stage 3: Verify resume + message.status = "thinking" + message.response = "Stage 3: Verifying resume for accuracy" + logger.info(message.response) + yield message + + async for message in self.verify_resume(message, generated_resume, skills_mapping, candidate_qualifications): + if message.status != "done": + yield message + if message.status == "error": + return + + verification_results = json.loads(message.response) + + # Handle corrections if needed + if verification_results["verification_results"]["overall_assessment"] == "NEEDS REVISION": + message.status = "thinking" + message.response = "Correcting issues found in verification" + logger.info(message.response) + yield message + + async for message in self.correct_resume_issues( + message=message, + generated_resume=generated_resume, + verification_results=verification_results, + skills_mapping=skills_mapping, + candidate_qualifications=candidate_qualifications, + original_header=original_header + ): + if message.status != "done": + yield message + if message.status == "error": + return + + generated_resume = message.response + + # Re-verify after corrections + message.status = "thinking" + message.response = "Re-verifying corrected resume" + logger.info(message.response) + async for message in self.verify_resume( + message=message, + generated_resume=generated_resume, + skills_mapping=skills_mapping, + candidate_qualifications=candidate_qualifications): + if message.status != "done": + yield message + if message.status == "error": + return + + # Return the final results + message.status = "done" + message.response = json.dumps({ + "job_requirements": job_requirements, + "candidate_qualifications": candidate_qualifications, + "skills_mapping": skills_mapping, + "generated_resume": generated_resume, + "verification_results": verification_results + }) + yield message + + logger.info("Resume generation process completed successfully") + return + + except Exception as e: + message.status = "error" + logger.info(message.response) + message.response = f"Error in resume generation process: {str(e)}" + logger.error(message.response) + logger.error(traceback.format_exc()) + yield message + raise + + # Main orchestration function + async def generate_llm_response(self, llm: Any, model: str, message: Message, temperature=0.7) -> AsyncGenerator[Message, None]: + logger.info(f"{self.agent_type} - {inspect.stack()[0].function}") + + self.llm = llm + self.model = model + + self.metrics.generate_count.labels(agent=self.agent_type).inc() + with self.metrics.generate_duration.labels(agent=self.agent_type).time(): + job_description = "You write C and C++ code for 4 years." + resume = "I have worked on Cobol and QuickBasic for 18 years." + additional_context = "" + + async for message in self.generate_factual_tailored_resume(message=message, job_description=job_description, resume=resume, additional_context=additional_context): + if message.status != "done": + yield message + + yield message + return + # Register the base agent agent_registry.register(JobDescription._agent_type, JobDescription)