Skill tracking almost working
This commit is contained in:
parent
7586725f11
commit
d9a0267cfa
170
src/backend/agents/job_requirements.py
Normal file
170
src/backend/agents/job_requirements.py
Normal file
@ -0,0 +1,170 @@
|
||||
from __future__ import annotations
|
||||
from pydantic import model_validator, Field # type: ignore
|
||||
from typing import (
|
||||
Dict,
|
||||
Literal,
|
||||
ClassVar,
|
||||
Any,
|
||||
AsyncGenerator,
|
||||
List,
|
||||
Optional
|
||||
# override
|
||||
) # NOTE: You must import Optional for late binding to work
|
||||
import inspect
|
||||
import re
|
||||
import json
|
||||
import traceback
|
||||
import asyncio
|
||||
import time
|
||||
import asyncio
|
||||
import numpy as np # type: ignore
|
||||
|
||||
from .base import Agent, agent_registry, LLMMessage
|
||||
from models import Candidate, ChatMessage, ChatMessageBase, ChatMessageMetaData, ChatMessageType, ChatMessageUser, ChatOptions, ChatSenderType, ChatStatusType, JobRequirements
|
||||
import model_cast
|
||||
from logger import logger
|
||||
import defines
|
||||
|
||||
class JobRequirementsAgent(Agent):
|
||||
agent_type: Literal["job_requirements"] = "job_requirements" # type: ignore
|
||||
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
||||
|
||||
# Stage 1A: Job Analysis Implementation
|
||||
def create_job_analysis_prompt(self, job_description: str) -> tuple[str, str]:
|
||||
"""Create the prompt for job requirements analysis."""
|
||||
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||
system_prompt = """
|
||||
You are an objective job requirements analyzer. Your task is to extract and categorize the specific skills,
|
||||
experiences, and qualifications required in a job description WITHOUT any reference to any candidate.
|
||||
|
||||
## INSTRUCTIONS:
|
||||
|
||||
1. Analyze ONLY the job description provided.
|
||||
2. Extract and categorize all requirements and preferences.
|
||||
3. DO NOT consider any candidate information - this is a pure job analysis task.
|
||||
|
||||
## OUTPUT FORMAT:
|
||||
|
||||
```json
|
||||
{
|
||||
"job_requirements": {
|
||||
"technical_skills": {
|
||||
"required": ["skill1", "skill2"],
|
||||
"preferred": ["skill1", "skill2"]
|
||||
},
|
||||
"experience_requirements": {
|
||||
"required": ["exp1", "exp2"],
|
||||
"preferred": ["exp1", "exp2"]
|
||||
},
|
||||
"education_requirements": ["req1", "req2"],
|
||||
"soft_skills": ["skill1", "skill2"],
|
||||
"industry_knowledge": ["knowledge1", "knowledge2"],
|
||||
"responsibilities": ["resp1", "resp2"],
|
||||
"company_values": ["value1", "value2"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Be specific and detailed in your extraction. Break down compound requirements into individual components.
|
||||
For example, "5+ years experience with React, Node.js and MongoDB" should be separated into:
|
||||
- Experience: "5+ years software development"
|
||||
- Technical skills: "React", "Node.js", "MongoDB"
|
||||
|
||||
Avoid vague categorizations and be precise about whether skills are explicitly required or just preferred.
|
||||
"""
|
||||
|
||||
prompt = f"Job Description:\n{job_description}"
|
||||
return system_prompt, prompt
|
||||
|
||||
async def analyze_job_requirements(
|
||||
self, llm: Any, model: str, user_message: ChatMessage
|
||||
) -> AsyncGenerator[ChatMessage, None]:
|
||||
"""Analyze job requirements from job description."""
|
||||
system_prompt, prompt = self.create_job_analysis_prompt(user_message.content)
|
||||
analyze_message = user_message.model_copy()
|
||||
analyze_message.content = prompt
|
||||
generated_message = None
|
||||
async for generated_message in self.llm_one_shot(llm, model, system_prompt=system_prompt, user_message=analyze_message):
|
||||
if generated_message.status == ChatStatusType.ERROR:
|
||||
generated_message.content = "Error analyzing job requirements."
|
||||
yield generated_message
|
||||
return
|
||||
|
||||
if not generated_message:
|
||||
status_message = ChatMessage(
|
||||
session_id=user_message.session_id,
|
||||
sender=ChatSenderType.AGENT,
|
||||
status = ChatStatusType.ERROR,
|
||||
type = ChatMessageType.ERROR,
|
||||
content = "Job requirements analysis failed to generate a response.")
|
||||
yield status_message
|
||||
return
|
||||
|
||||
generated_message.status = ChatStatusType.DONE
|
||||
generated_message.type = ChatMessageType.RESPONSE
|
||||
yield generated_message
|
||||
return
|
||||
|
||||
async def generate(
|
||||
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate | None, temperature=0.7
|
||||
) -> AsyncGenerator[ChatMessage, None]:
|
||||
# Stage 1A: Analyze job requirements
|
||||
status_message = ChatMessage(
|
||||
session_id=user_message.session_id,
|
||||
sender=ChatSenderType.AGENT,
|
||||
status=ChatStatusType.STATUS,
|
||||
type=ChatMessageType.THINKING,
|
||||
content = f"Analyzing job requirements")
|
||||
yield status_message
|
||||
|
||||
generated_message = None
|
||||
async for generated_message in self.analyze_job_requirements(llm, model, user_message):
|
||||
if generated_message.status == ChatStatusType.ERROR:
|
||||
status_message.status = ChatStatusType.ERROR
|
||||
status_message.content = generated_message.content
|
||||
yield status_message
|
||||
return
|
||||
if not generated_message:
|
||||
status_message.status = ChatStatusType.ERROR
|
||||
status_message.content = "Job requirements analysis failed."
|
||||
yield status_message
|
||||
return
|
||||
|
||||
json_str = self.extract_json_from_text(generated_message.content)
|
||||
job_requirements : JobRequirements | None = None
|
||||
job_requirements_data = ""
|
||||
try:
|
||||
job_requirements_data = json.loads(json_str)
|
||||
job_requirements_data = job_requirements_data.get("job_requirements", None)
|
||||
job_requirements = JobRequirements.model_validate(job_requirements_data)
|
||||
if not job_requirements:
|
||||
raise ValueError("Job requirements data is empty or invalid.")
|
||||
except json.JSONDecodeError as e:
|
||||
status_message.status = ChatStatusType.ERROR
|
||||
status_message.content = f"Failed to parse job requirements JSON: {str(e)}\n\n{job_requirements_data}"
|
||||
logger.error(f"⚠️ {status_message.content}")
|
||||
yield status_message
|
||||
return
|
||||
except ValueError as e:
|
||||
status_message.status = ChatStatusType.ERROR
|
||||
status_message.content = f"Job requirements validation error: {str(e)}\n\n{job_requirements_data}"
|
||||
logger.error(f"⚠️ {status_message.content}")
|
||||
yield status_message
|
||||
return
|
||||
except Exception as e:
|
||||
status_message.status = ChatStatusType.ERROR
|
||||
status_message.content = f"Unexpected error processing job requirements: {str(e)}\n\n{job_requirements_data}"
|
||||
logger.error(traceback.format_exc())
|
||||
logger.error(f"⚠️ {status_message.content}")
|
||||
yield status_message
|
||||
return
|
||||
status_message.status = ChatStatusType.DONE
|
||||
status_message.type = ChatMessageType.RESPONSE
|
||||
status_message.content = json.dumps(job_requirements.model_dump(mode="json", exclude_unset=True))
|
||||
yield status_message
|
||||
|
||||
logger.info(f"✅ Job requirements analysis completed successfully.")
|
||||
return
|
||||
|
||||
# Register the base agent
|
||||
agent_registry.register(JobRequirementsAgent._agent_type, JobRequirementsAgent)
|
233
src/backend/agents/skill_match.py
Normal file
233
src/backend/agents/skill_match.py
Normal file
@ -0,0 +1,233 @@
|
||||
from __future__ import annotations
|
||||
from pydantic import model_validator, Field # type: ignore
|
||||
from typing import (
|
||||
Dict,
|
||||
Literal,
|
||||
ClassVar,
|
||||
Any,
|
||||
AsyncGenerator,
|
||||
List,
|
||||
Optional
|
||||
# override
|
||||
) # NOTE: You must import Optional for late binding to work
|
||||
import inspect
|
||||
import re
|
||||
import json
|
||||
import traceback
|
||||
import asyncio
|
||||
import time
|
||||
import asyncio
|
||||
import numpy as np # type: ignore
|
||||
|
||||
from .base import Agent, agent_registry, LLMMessage
|
||||
from models import Candidate, ChatMessage, ChatMessageBase, ChatMessageMetaData, ChatMessageType, ChatMessageUser, ChatOptions, ChatSenderType, ChatStatusType, SkillMatch
|
||||
import model_cast
|
||||
from logger import logger
|
||||
import defines
|
||||
|
||||
class SkillMatchAgent(Agent):
|
||||
agent_type: Literal["skill_match"] = "skill_match" # type: ignore
|
||||
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
||||
|
||||
def generate_skill_assessment_prompt(self, skill, rag_content):
|
||||
"""
|
||||
Generate a system prompt to query the LLM for evidence of a specific skill
|
||||
|
||||
Parameters:
|
||||
- skill (str): The specific skill to assess from job requirements
|
||||
- rag_content (str): Additional RAG content queried from candidate documents
|
||||
|
||||
Returns:
|
||||
- str: A system prompt tailored to assess the specific skill
|
||||
"""
|
||||
|
||||
system_prompt = f"""You are an objective skill assessor. Your task is to determine if a candidate possesses
|
||||
a SPECIFIC skill based solely on their resume and supporting evidence.
|
||||
|
||||
## SKILL TO ASSESS:
|
||||
"{skill}"
|
||||
|
||||
## INSTRUCTIONS:
|
||||
1. Focus exclusively on assessing the candidate's proficiency with the skill: "{skill}".
|
||||
2. Examine the resume and supporting documents for both explicit mentions and clearly demonstrated applications of the skill.
|
||||
3. Do NOT infer the skill unless it is either:
|
||||
- Directly mentioned in association with experience, or
|
||||
- Clearly evidenced through relevant tools, technologies, responsibilities, or outcomes.
|
||||
- Referenced as being used or listed as part of a broader initiative.
|
||||
4. Evaluate each piece of evidence and assign a confidence rating:
|
||||
- STRONG: Explicit use with clear context or repeated/prolonged experience
|
||||
- MODERATE: Inferred through tools, environments, or outcomes (e.g., Python used in a listed project/tool)
|
||||
- WEAK: Mentioned in a list or indirectly implied without context
|
||||
- NONE: No relevant evidence
|
||||
5. Provide up to 10 evidence_details. Each should include:
|
||||
- source: where the evidence appears
|
||||
- quote: an exact snippet from the document(s)
|
||||
- context: a short rationale explaining how this supports the skill claim
|
||||
6. When no supporting evidence exists, output a "NONE" rating with an empty evidence details list.
|
||||
7. IMPORTANT: Even if the skill is only listed in a technologies or languages section, consider this valid evidence.
|
||||
|
||||
## OUTPUT FORMAT:
|
||||
```json
|
||||
{{
|
||||
"skill_assessment": {{
|
||||
"skill": "{skill}",
|
||||
"evidence_found": true/false,
|
||||
"evidence_strength": "STRONG/MODERATE/WEAK/NONE",
|
||||
"description": "short (two to three sentence) description of what {skill} means with a concise example of what you're looking for",
|
||||
"evidence_details": [
|
||||
{{
|
||||
"source": "resume section/position/project",
|
||||
"quote": "exact text from resume showing evidence",
|
||||
"context": "brief explanation of how this demonstrates the skill"
|
||||
}}
|
||||
]
|
||||
}}
|
||||
}}
|
||||
```
|
||||
|
||||
IMPORTANT: Be factual and precise. If you cannot find strong evidence for this specific skill, it's better to indicate "evidence_strength": WEAK than to stretch for connections.
|
||||
Focus only on "{skill}" and not similar skills unless they directly demonstrate the required skill.
|
||||
Remember that a skill listed in a "Languages" or "Technologies" section should be considered valid evidence.
|
||||
|
||||
Adhere strictly to the JSON output format requested. Do not include any additional text or commentary outside the JSON structure.
|
||||
"""
|
||||
|
||||
prompt = f"""Analyze the candidate information below for evidence of "{skill}".
|
||||
|
||||
RESPOND WITH ONLY VALID JSON USING THE EXACT FORMAT SPECIFIED.
|
||||
|
||||
<candidate_info>
|
||||
{rag_content}
|
||||
</candidate_info>
|
||||
|
||||
JSON RESPONSE:"""
|
||||
|
||||
return system_prompt, prompt
|
||||
|
||||
async def analyze_job_requirements(
|
||||
self, llm: Any, model: str, user_message: ChatMessage
|
||||
) -> AsyncGenerator[ChatMessage, None]:
|
||||
"""Analyze job requirements from job description."""
|
||||
system_prompt, prompt = self.create_job_analysis_prompt(user_message.content)
|
||||
analyze_message = user_message.model_copy()
|
||||
analyze_message.content = prompt
|
||||
generated_message = None
|
||||
async for generated_message in self.llm_one_shot(llm, model, system_prompt=system_prompt, user_message=analyze_message):
|
||||
if generated_message.status == ChatStatusType.ERROR:
|
||||
generated_message.content = "Error analyzing job requirements."
|
||||
yield generated_message
|
||||
return
|
||||
|
||||
if not generated_message:
|
||||
status_message = ChatMessage(
|
||||
session_id=user_message.session_id,
|
||||
sender=ChatSenderType.AGENT,
|
||||
status = ChatStatusType.ERROR,
|
||||
type = ChatMessageType.ERROR,
|
||||
content = "Job requirements analysis failed to generate a response.")
|
||||
yield status_message
|
||||
return
|
||||
|
||||
generated_message.status = ChatStatusType.DONE
|
||||
generated_message.type = ChatMessageType.RESPONSE
|
||||
yield generated_message
|
||||
return
|
||||
|
||||
async def generate(
|
||||
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate | None, temperature=0.7
|
||||
) -> AsyncGenerator[ChatMessage, None]:
|
||||
# Stage 1A: Analyze job requirements
|
||||
status_message = ChatMessage(
|
||||
session_id=user_message.session_id,
|
||||
sender=ChatSenderType.AGENT,
|
||||
status=ChatStatusType.STATUS,
|
||||
type=ChatMessageType.THINKING,
|
||||
content = f"Analyzing job requirements")
|
||||
yield status_message
|
||||
|
||||
rag_message = None
|
||||
async for rag_message in self.generate_rag_results(chat_message=user_message):
|
||||
if rag_message.status == ChatStatusType.ERROR:
|
||||
status_message.status = ChatStatusType.ERROR
|
||||
status_message.content = rag_message.content
|
||||
logger.error(f"⚠️ {status_message.content}")
|
||||
yield status_message
|
||||
return
|
||||
|
||||
if rag_message is None:
|
||||
status_message.status = ChatStatusType.ERROR
|
||||
status_message.content = "Failed to retrieve RAG context."
|
||||
logger.error(f"⚠️ {status_message.content}")
|
||||
yield status_message
|
||||
return
|
||||
|
||||
logger.info(f"🔍 RAG content retrieved: {len(rag_message.content)} bytes")
|
||||
|
||||
system_prompt, prompt = self.generate_skill_assessment_prompt(skill=user_message.content, rag_content=rag_message.content)
|
||||
|
||||
user_message.content = prompt
|
||||
skill_assessment = None
|
||||
async for skill_assessment in self.llm_one_shot(llm=llm, model=model, user_message=user_message, system_prompt=system_prompt, temperature=0.1):
|
||||
if skill_assessment.status == ChatStatusType.ERROR:
|
||||
status_message.status = ChatStatusType.ERROR
|
||||
status_message.content = skill_assessment.content
|
||||
logger.error(f"⚠️ {status_message.content}")
|
||||
yield status_message
|
||||
return
|
||||
if skill_assessment is None:
|
||||
status_message.status = ChatStatusType.ERROR
|
||||
status_message.content = "Failed to generate skill assessment."
|
||||
logger.error(f"⚠️ {status_message.content}")
|
||||
yield status_message
|
||||
return
|
||||
|
||||
json_str = self.extract_json_from_text(skill_assessment.content)
|
||||
skill_match = json_str#: SkillMatch | None = None
|
||||
skill_assessment_data = ""
|
||||
try:
|
||||
skill_assessment_data = json.loads(json_str)
|
||||
match_level = (
|
||||
skill_assessment_data
|
||||
.get("skill_assessment", {})
|
||||
.get("evidence_strength", "UNKNOWN")
|
||||
)
|
||||
skill_description = (
|
||||
skill_assessment_data
|
||||
.get("skill_assessment", {})
|
||||
.get("description", "")
|
||||
)
|
||||
except json.JSONDecodeError as e:
|
||||
status_message.status = ChatStatusType.ERROR
|
||||
status_message.content = f"Failed to parse Skill assessment JSON: {str(e)}\n\n{skill_assessment_data}"
|
||||
logger.error(f"⚠️ {status_message.content}")
|
||||
yield status_message
|
||||
return
|
||||
except ValueError as e:
|
||||
status_message.status = ChatStatusType.ERROR
|
||||
status_message.content = f"Skill assessment validation error: {str(e)}\n\n{skill_assessment_data}"
|
||||
logger.error(f"⚠️ {status_message.content}")
|
||||
yield status_message
|
||||
return
|
||||
except Exception as e:
|
||||
status_message.status = ChatStatusType.ERROR
|
||||
status_message.content = f"Unexpected error processing Skill assessment: {str(e)}\n\n{skill_assessment_data}"
|
||||
logger.error(traceback.format_exc())
|
||||
logger.error(f"⚠️ {status_message.content}")
|
||||
yield status_message
|
||||
return
|
||||
if skill_match is None:
|
||||
status_message.status = ChatStatusType.ERROR
|
||||
status_message.content = "Skill assessment analysis failed to produce valid data."
|
||||
logger.error(f"⚠️ {status_message.content}")
|
||||
yield status_message
|
||||
return
|
||||
status_message.status = ChatStatusType.DONE
|
||||
status_message.type = ChatMessageType.RESPONSE
|
||||
status_message.content = skill_match
|
||||
yield status_message
|
||||
|
||||
logger.info(f"✅ Skill assessment completed successfully.")
|
||||
return
|
||||
|
||||
# Register the base agent
|
||||
agent_registry.register(SkillMatchAgent._agent_type, SkillMatchAgent)
|
Loading…
x
Reference in New Issue
Block a user