Skill assessment is now streaming

This commit is contained in:
James Ketr 2025-06-09 09:19:00 -07:00
parent 817a8e4b66
commit 781275e9a9
7 changed files with 351 additions and 195 deletions

View File

@ -22,16 +22,14 @@ import CheckCircleIcon from '@mui/icons-material/CheckCircle';
import ErrorIcon from '@mui/icons-material/Error'; import ErrorIcon from '@mui/icons-material/Error';
import PendingIcon from '@mui/icons-material/Pending'; import PendingIcon from '@mui/icons-material/Pending';
import WarningIcon from '@mui/icons-material/Warning'; import WarningIcon from '@mui/icons-material/Warning';
import { Candidate, ChatMessage, ChatMessageError, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, ChatSession, JobRequirements, SkillMatch } from 'types/types'; import { Candidate, ChatMessage, ChatMessageError, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, ChatSession, EvidenceDetail, JobRequirements, SkillAssessment, SkillStatus } from 'types/types';
import { useAuth } from 'hooks/AuthContext'; import { useAuth } from 'hooks/AuthContext';
import { BackstoryPageProps } from './BackstoryTab'; import { BackstoryPageProps } from './BackstoryTab';
import { toCamelCase } from 'types/conversion';
import { Job } from 'types/types'; import { Job } from 'types/types';
import { StyledMarkdown } from './StyledMarkdown'; import { StyledMarkdown } from './StyledMarkdown';
import { Scrollable } from './Scrollable'; import { Scrollable } from './Scrollable';
import { start } from 'repl';
import { TypesElement } from '@uiw/react-json-view';
import { useAppState } from 'hooks/GlobalContext'; import { useAppState } from 'hooks/GlobalContext';
import * as Types from 'types/types';
interface JobAnalysisProps extends BackstoryPageProps { interface JobAnalysisProps extends BackstoryPageProps {
job: Job; job: Job;
@ -42,6 +40,12 @@ const defaultMessage: ChatMessage = {
status: "done", type: "text", sessionId: "", timestamp: new Date(), content: "", role: "assistant", metadata: null as any status: "done", type: "text", sessionId: "", timestamp: new Date(), content: "", role: "assistant", metadata: null as any
}; };
interface SkillMatch extends SkillAssessment {
domain: string;
status: SkillStatus;
matchScore: number;
}
const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) => { const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) => {
const { const {
job, job,
@ -60,6 +64,7 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
const [statusMessage, setStatusMessage] = useState<ChatMessage | null>(null); const [statusMessage, setStatusMessage] = useState<ChatMessage | null>(null);
const [startAnalysis, setStartAnalysis] = useState<boolean>(false); const [startAnalysis, setStartAnalysis] = useState<boolean>(false);
const [analyzing, setAnalyzing] = useState<boolean>(false); const [analyzing, setAnalyzing] = useState<boolean>(false);
const [matchStatus, setMatchStatus] = useState<string>('');
const isMobile = useMediaQuery(theme.breakpoints.down('sm')); const isMobile = useMediaQuery(theme.breakpoints.down('sm'));
@ -97,14 +102,17 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
job.requirements.preferredAttributes.forEach(req => requirements.push({ requirement: req, domain: 'Preferred Attributes' })); job.requirements.preferredAttributes.forEach(req => requirements.push({ requirement: req, domain: 'Preferred Attributes' }));
} }
const initialSkillMatches = requirements.map(req => ({ const initialSkillMatches: SkillMatch[] = requirements.map(req => ({
requirement: req.requirement, skill: req.requirement,
candidateId: candidate.id || "",
domain: req.domain, domain: req.domain,
status: 'waiting' as const, status: 'waiting' as const,
assessment: "",
description: "",
evidenceFound: false,
evidenceStrength: "none",
evidenceDetails: [],
matchScore: 0, matchScore: 0,
assessment: '',
description: '',
citations: []
})); }));
setRequirements(requirements); setRequirements(requirements);
@ -118,6 +126,13 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
initializeRequirements(job); initializeRequirements(job);
}, [job]); }, [job]);
const skillMatchHandlers = {
onStatus: (status: Types.ChatMessageStatus) => {
console.log('status:', status.content);
setMatchStatus(status.content);
},
};
// Fetch match data for each requirement // Fetch match data for each requirement
useEffect(() => { useEffect(() => {
if (!startAnalysis || analyzing || !job.requirements) { if (!startAnalysis || analyzing || !job.requirements) {
@ -136,10 +151,12 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
return updated; return updated;
}); });
const result: any = await apiClient.candidateMatchForRequirement(candidate.id || '', requirements[i].requirement); const request: any = await apiClient.candidateMatchForRequirement(candidate.id || '', requirements[i].requirement, skillMatchHandlers);
const skillMatch = result.skillMatch; const result = await request.promise;
const skillMatch = result.skillAssessment;
setMatchStatus('');
let matchScore: number = 0; let matchScore: number = 0;
switch (skillMatch.evidenceStrength) { switch (skillMatch.evidenceStrength.toUpperCase()) {
case "STRONG": matchScore = 100; break; case "STRONG": matchScore = 100; break;
case "MODERATE": matchScore = 75; break; case "MODERATE": matchScore = 75; break;
case "WEAK": matchScore = 50; break; case "WEAK": matchScore = 50; break;
@ -149,13 +166,10 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
matchScore = Math.min(skillMatch.citations.length * 8, 40); matchScore = Math.min(skillMatch.citations.length * 8, 40);
} }
const match: SkillMatch = { const match: SkillMatch = {
status: "complete", ...skillMatch,
status: 'complete',
matchScore, matchScore,
domain: requirements[i].domain, domain: requirements[i].domain,
requirement: skillMatch.skill,
assessment: skillMatch.assessment,
citations: skillMatch.evidenceDetails.map((evidence: any) => { return { source: evidence.source, text: evidence.quote, context: evidence.context } }),
description: skillMatch.description
}; };
setSkillMatches(prev => { setSkillMatches(prev => {
const updated = [...prev]; const updated = [...prev];
@ -357,7 +371,7 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
{getStatusIcon(match.status, match.matchScore)} {getStatusIcon(match.status, match.matchScore)}
<Box sx={{ display: "flex", flexDirection: "column", gap: 0, p: 0, m: 0 }}> <Box sx={{ display: "flex", flexDirection: "column", gap: 0, p: 0, m: 0 }}>
<Typography sx={{ ml: 1, mb: 0, fontWeight: 'medium', marginBottom: "0px !important" }}> <Typography sx={{ ml: 1, mb: 0, fontWeight: 'medium', marginBottom: "0px !important" }}>
{match.requirement} {match.skill}
</Typography> </Typography>
<Typography variant="caption" sx={{ ml: 1, fontWeight: 'light' }}> <Typography variant="caption" sx={{ ml: 1, fontWeight: 'light' }}>
{match.domain} {match.domain}
@ -402,7 +416,7 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
<Box sx={{ width: '100%', p: 2 }}> <Box sx={{ width: '100%', p: 2 }}>
<LinearProgress /> <LinearProgress />
<Typography sx={{ mt: 2 }}> <Typography sx={{ mt: 2 }}>
Analyzing candidate's match for this requirement... Analyzing candidate's match for this requirement... {matchStatus}
</Typography> </Typography>
</Box> </Box>
) : match.status === 'error' ? ( ) : match.status === 'error' ? (
@ -422,10 +436,10 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
<Typography variant="h6" gutterBottom> <Typography variant="h6" gutterBottom>
Supporting Evidence Supporting Evidence
</Typography> </Typography>
{match.citations && match.citations.length > 0 ? ( {match.evidenceDetails && match.evidenceDetails.length > 0 ? (
match.citations.map((citation, citIndex) => ( match.evidenceDetails.map((evidence, evndex) => (
<Card <Card
key={citIndex} key={evndex}
variant="outlined" variant="outlined"
sx={{ sx={{
mb: 2, mb: 2,
@ -435,14 +449,14 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
> >
<CardContent> <CardContent>
<Typography variant="body1" component="div" sx={{ mb: 1, fontStyle: 'italic' }}> <Typography variant="body1" component="div" sx={{ mb: 1, fontStyle: 'italic' }}>
"{citation.text}" "{evidence.quote}"
</Typography> </Typography>
<Box sx={{ display: 'flex', justifyContent: 'space-between', alignItems: 'flex-start', flexDirection: "column" }}> <Box sx={{ display: 'flex', justifyContent: 'space-between', alignItems: 'flex-start', flexDirection: "column" }}>
<Typography variant="body2" color="text.secondary"> <Typography variant="body2" color="text.secondary">
Relevance: {citation.context} Relevance: {evidence.context}
</Typography> </Typography>
<Typography variant="caption" color="text.secondary"> <Typography variant="caption" color="text.secondary">
Source: {citation.source} Source: {evidence.source}
</Typography> </Typography>
{/* <Chip {/* <Chip
size="small" size="small"

View File

@ -910,16 +910,15 @@ class ApiClient {
return this.streamify<Types.DocumentMessage>(`/jobs/requirements/${jobId}`, null, streamingOptions); return this.streamify<Types.DocumentMessage>(`/jobs/requirements/${jobId}`, null, streamingOptions);
} }
async candidateMatchForRequirement(candidate_id: string, requirement: string) : Promise<Types.SkillMatch> { candidateMatchForRequirement(candidate_id: string, requirement: string,
const response = await fetch(`${this.baseUrl}/candidates/${candidate_id}/skill-match`, { streamingOptions?: StreamingOptions<Types.ChatMessageSkillAssessment>)
method: 'POST', : StreamingResponse<Types.ChatMessageSkillAssessment> {
const body = JSON.stringify(requirement);
streamingOptions = {
...streamingOptions,
headers: this.defaultHeaders, headers: this.defaultHeaders,
body: JSON.stringify(requirement) };
}); return this.streamify<Types.ChatMessageSkillAssessment>(`/candidates/${candidate_id}/skill-match`, body, streamingOptions);
const result = await handleApiResponse<Types.SkillMatch>(response);
return result;
} }
async updateCandidateDocument(document: Types.Document) : Promise<Types.Document> { async updateCandidateDocument(document: Types.Document) : Promise<Types.Document> {

View File

@ -1,6 +1,6 @@
// Generated TypeScript types from Pydantic models // Generated TypeScript types from Pydantic models
// Source: src/backend/models.py // Source: src/backend/models.py
// Generated on: 2025-06-09T03:32:01.335483 // Generated on: 2025-06-09T15:19:57.985888
// DO NOT EDIT MANUALLY - This file is auto-generated // DO NOT EDIT MANUALLY - This file is auto-generated
// ============================ // ============================
@ -53,6 +53,8 @@ export type SkillLevel = "beginner" | "intermediate" | "advanced" | "expert";
export type SkillStatus = "pending" | "complete" | "waiting" | "error"; export type SkillStatus = "pending" | "complete" | "waiting" | "error";
export type SkillStrength = "strong" | "moderate" | "weak" | "none";
export type SocialPlatform = "linkedin" | "twitter" | "github" | "dribbble" | "behance" | "website" | "other"; export type SocialPlatform = "linkedin" | "twitter" | "github" | "dribbble" | "behance" | "website" | "other";
export type SortOrder = "asc" | "desc"; export type SortOrder = "asc" | "desc";
@ -339,6 +341,20 @@ export interface ChatMessageRagSearch {
content: Array<ChromaDBGetResponse>; content: Array<ChromaDBGetResponse>;
} }
export interface ChatMessageSkillAssessment {
id?: string;
sessionId: string;
senderId?: string;
status: "streaming" | "status" | "done" | "error";
type: "binary" | "text" | "json";
timestamp?: Date;
role: "user" | "assistant" | "system" | "information" | "warning" | "error";
content: string;
tunables?: Tunables;
metadata: ChatMessageMetaData;
skillAssessment: SkillAssessment;
}
export interface ChatMessageStatus { export interface ChatMessageStatus {
id?: string; id?: string;
sessionId: string; sessionId: string;
@ -575,6 +591,12 @@ export interface ErrorDetail {
details?: any; details?: any;
} }
export interface EvidenceDetail {
source: string;
quote: string;
context: string;
}
export interface GPUInfo { export interface GPUInfo {
name: string; name: string;
memory: number; memory: number;
@ -975,19 +997,15 @@ export interface Skill {
} }
export interface SkillAssessment { export interface SkillAssessment {
skillName: string; candidateId: string;
score: number; skill: string;
comments?: string; evidenceFound: boolean;
} evidenceStrength: "strong" | "moderate" | "weak" | "none";
export interface SkillMatch {
requirement: string;
domain: string;
status: "pending" | "complete" | "waiting" | "error";
matchScore: number;
assessment: string; assessment: string;
citations?: Array<Citation>;
description: string; description: string;
evidenceDetails?: Array<EvidenceDetail>;
createdAt?: Date;
updatedAt?: Date;
} }
export interface SocialLink { export interface SocialLink {
@ -1250,6 +1268,19 @@ export function convertChatMessageRagSearchFromApi(data: any): ChatMessageRagSea
timestamp: data.timestamp ? new Date(data.timestamp) : undefined, timestamp: data.timestamp ? new Date(data.timestamp) : undefined,
}; };
} }
/**
* Convert ChatMessageSkillAssessment from API response, parsing date fields
* Date fields: timestamp
*/
export function convertChatMessageSkillAssessmentFromApi(data: any): ChatMessageSkillAssessment {
if (!data) return data;
return {
...data,
// Convert timestamp from ISO string to Date
timestamp: data.timestamp ? new Date(data.timestamp) : undefined,
};
}
/** /**
* Convert ChatMessageStatus from API response, parsing date fields * Convert ChatMessageStatus from API response, parsing date fields
* Date fields: timestamp * Date fields: timestamp
@ -1551,6 +1582,21 @@ export function convertRefreshTokenFromApi(data: any): RefreshToken {
expiresAt: new Date(data.expiresAt), expiresAt: new Date(data.expiresAt),
}; };
} }
/**
* Convert SkillAssessment from API response, parsing date fields
* Date fields: createdAt, updatedAt
*/
export function convertSkillAssessmentFromApi(data: any): SkillAssessment {
if (!data) return data;
return {
...data,
// Convert createdAt from ISO string to Date
createdAt: data.createdAt ? new Date(data.createdAt) : undefined,
// Convert updatedAt from ISO string to Date
updatedAt: data.updatedAt ? new Date(data.updatedAt) : undefined,
};
}
/** /**
* Convert UserActivity from API response, parsing date fields * Convert UserActivity from API response, parsing date fields
* Date fields: timestamp * Date fields: timestamp
@ -1614,6 +1660,8 @@ export function convertFromApi<T>(data: any, modelType: string): T {
return convertChatMessageErrorFromApi(data) as T; return convertChatMessageErrorFromApi(data) as T;
case 'ChatMessageRagSearch': case 'ChatMessageRagSearch':
return convertChatMessageRagSearchFromApi(data) as T; return convertChatMessageRagSearchFromApi(data) as T;
case 'ChatMessageSkillAssessment':
return convertChatMessageSkillAssessmentFromApi(data) as T;
case 'ChatMessageStatus': case 'ChatMessageStatus':
return convertChatMessageStatusFromApi(data) as T; return convertChatMessageStatusFromApi(data) as T;
case 'ChatMessageStreaming': case 'ChatMessageStreaming':
@ -1656,6 +1704,8 @@ export function convertFromApi<T>(data: any, modelType: string): T {
return convertRateLimitStatusFromApi(data) as T; return convertRateLimitStatusFromApi(data) as T;
case 'RefreshToken': case 'RefreshToken':
return convertRefreshTokenFromApi(data) as T; return convertRefreshTokenFromApi(data) as T;
case 'SkillAssessment':
return convertSkillAssessmentFromApi(data) as T;
case 'UserActivity': case 'UserActivity':
return convertUserActivityFromApi(data) as T; return convertUserActivityFromApi(data) as T;
case 'WorkExperience': case 'WorkExperience':

View File

@ -1,5 +1,5 @@
from __future__ import annotations from __future__ import annotations
from pydantic import model_validator, Field from pydantic import model_validator, Field # type: ignore
from typing import ( from typing import (
Dict, Dict,
Literal, Literal,
@ -16,16 +16,19 @@ import json
import asyncio import asyncio
import time import time
import asyncio import asyncio
import numpy as np import numpy as np # type: ignore
from .base import Agent, agent_registry, LLMMessage from .base import Agent, agent_registry, LLMMessage
from models import Candidate, ChatMessage, ChatMessageError, ChatMessageMetaData, ApiMessageType, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, ChatOptions, ChatSenderType, ApiStatusType, SkillMatch, Tunables from models import (Candidate, ChatMessage, ChatMessageError, ChatMessageMetaData, ApiMessageType,
ChatMessageSkillAssessment, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, ChatOptions,
ChatSenderType, ApiStatusType, EvidenceDetail, SkillAssessment, Tunables)
import model_cast import model_cast
from logger import logger from logger import logger
import defines import defines
import backstory_traceback as traceback
class SkillMatchAgent(Agent): class SkillMatchAgent(Agent):
agent_type: Literal["skill_match"] = "skill_match" agent_type: Literal["skill_match"] = "skill_match" # type: ignore
_agent_type: ClassVar[str] = agent_type # Add this for registration _agent_type: ClassVar[str] = agent_type # Add this for registration
def generate_skill_assessment_prompt(self, skill, rag_context): def generate_skill_assessment_prompt(self, skill, rag_context):
@ -141,6 +144,14 @@ JSON RESPONSE:"""
tunables: Optional[Tunables] = None, tunables: Optional[Tunables] = None,
temperature=0.7 temperature=0.7
) -> AsyncGenerator[ChatMessage | ChatMessageStatus | ChatMessageError | ChatMessageStreaming, None]: ) -> AsyncGenerator[ChatMessage | ChatMessageStatus | ChatMessageError | ChatMessageStreaming, None]:
if not self.user:
error_message = ChatMessageError(
session_id=session_id,
content="Agent not attached to user. Attach the agent to a user before generating responses."
)
logger.error(f"⚠️ {error_message.content}")
yield error_message
return
# Stage 1A: Analyze job requirements # Stage 1A: Analyze job requirements
rag_message = None rag_message = None
async for rag_message in self.generate_rag_results(session_id=session_id, prompt=prompt): async for rag_message in self.generate_rag_results(session_id=session_id, prompt=prompt):
@ -163,16 +174,16 @@ JSON RESPONSE:"""
rag_context = self.get_rag_context(rag_message) rag_context = self.get_rag_context(rag_message)
system_prompt, prompt = self.generate_skill_assessment_prompt(skill=prompt, rag_context=rag_context) system_prompt, prompt = self.generate_skill_assessment_prompt(skill=prompt, rag_context=rag_context)
skill_assessment = None skill_message = None
async for skill_assessment in self.llm_one_shot(llm=llm, model=model, session_id=session_id, prompt=prompt, system_prompt=system_prompt, temperature=0.7): async for skill_message in self.llm_one_shot(llm=llm, model=model, session_id=session_id, prompt=prompt, system_prompt=system_prompt, temperature=0.7):
if skill_assessment.status == ApiStatusType.ERROR: if skill_message.status == ApiStatusType.ERROR:
logger.error(f"⚠️ {skill_assessment.content}") logger.error(f"⚠️ {skill_message.content}")
yield skill_assessment yield skill_message
return return
if skill_assessment.status != ApiStatusType.DONE: if skill_message.status != ApiStatusType.DONE:
yield skill_assessment yield skill_message
if skill_assessment is None: if skill_message is None:
error_message = ChatMessageError( error_message = ChatMessageError(
session_id=session_id, session_id=session_id,
content="Skill assessment failed to generate a response." content="Skill assessment failed to generate a response."
@ -181,24 +192,42 @@ JSON RESPONSE:"""
yield error_message yield error_message
return return
json_str = self.extract_json_from_text(skill_assessment.content) json_str = self.extract_json_from_text(skill_message.content)
skill_assessment_data = "" skill_assessment_data = ""
skill_assessment = None
try: try:
skill_assessment_data = json.loads(json_str).get("skill_assessment", {}) skill_assessment_data = json.loads(json_str).get("skill_assessment", {})
skill_assessment = SkillAssessment(
candidate_id=self.user.id,
skill=skill_assessment_data.get("skill", ""),
evidence_found=skill_assessment_data.get("evidence_found", False),
evidence_strength=skill_assessment_data.get("evidence_strength", "NONE").lower(),
assessment=skill_assessment_data.get("assessment", ""),
description=skill_assessment_data.get("description", ""),
evidence_details=[
EvidenceDetail(
source=evidence.get("source", ""),
quote=evidence.get("quote", ""),
context=evidence.get("context", "")
) for evidence in skill_assessment_data.get("evidence_details", [])
]
)
except Exception as e: except Exception as e:
error_message = ChatMessageError( error_message = ChatMessageError(
session_id=session_id, session_id=session_id,
content=f"Failed to parse Skill assessment JSON: {str(e)}\n\n{skill_assessment.content}" content=f"Failed to parse Skill assessment JSON: {str(e)}\n\n{skill_message.content}"
) )
logger.error(traceback.format_exc())
logger.error(f"⚠️ {error_message.content}") logger.error(f"⚠️ {error_message.content}")
yield error_message yield error_message
return return
skill_assessment_message = ChatMessage( skill_assessment_message = ChatMessageSkillAssessment(
session_id=session_id, session_id=session_id,
status=ApiStatusType.DONE, status=ApiStatusType.DONE,
content=json.dumps(skill_assessment_data), content=json.dumps(skill_assessment_data),
metadata=skill_assessment.metadata metadata=skill_message.metadata,
skill_assessment=skill_assessment,
) )
yield skill_assessment_message yield skill_assessment_message
logger.info(f"✅ Skill assessment completed successfully.") logger.info(f"✅ Skill assessment completed successfully.")

View File

@ -7,7 +7,7 @@ from datetime import datetime, timezone, UTC, timedelta
import asyncio import asyncio
from models import ( from models import (
# User models # User models
Candidate, Employer, BaseUser, Guest, Authentication, AuthResponse, Candidate, Employer, BaseUser, EvidenceDetail, Guest, Authentication, AuthResponse, SkillAssessment,
) )
import backstory_traceback as traceback import backstory_traceback as traceback
@ -255,64 +255,69 @@ class RedisDatabase:
logger.error(f"Error deleting all documents for candidate {candidate_id}: {e}") logger.error(f"Error deleting all documents for candidate {candidate_id}: {e}")
raise raise
async def get_cached_skill_match(self, cache_key: str) -> Optional[Dict[str, Any]]: async def get_cached_skill_match(self, cache_key: str) -> Optional[SkillAssessment]:
"""Get cached skill match assessment""" """Get cached skill match assessment"""
try: try:
data = await self.redis.get(cache_key) json_str = await self.redis.get(cache_key)
if data: if json_str:
return json.loads(data) json_data = json.loads(json_str)
skill_assessment = SkillAssessment.model_validate(json_data)
return skill_assessment
return None return None
except Exception as e: except Exception as e:
logger.error(f"❌ Error getting cached skill match: {e}") logger.error(f"❌ Error getting cached skill match: {e}")
return None return None
async def cache_skill_match(self, cache_key: str, assessment_data: Dict[str, Any]) -> None: async def cache_skill_match(self, cache_key: str, assessment: SkillAssessment) -> None:
"""Cache skill match assessment""" """Cache skill match assessment"""
try: try:
# Cache for 1 hour by default # Cache for 1 hour by default
await self.redis.setex( await self.redis.setex(
cache_key, cache_key,
3600, 3600,
json.dumps(assessment_data) json.dumps(assessment.model_dump(mode='json', by_alias=True), default=str) # Serialize with datetime handling
) )
logger.debug(f"💾 Skill match cached: {cache_key}") logger.debug(f"💾 Skill match cached: {cache_key}")
except Exception as e: except Exception as e:
logger.error(f"❌ Error caching skill match: {e}") logger.error(f"❌ Error caching skill match: {e}")
async def get_candidate_skill_update_time(self, candidate_id: str) -> Optional[datetime]:
"""Get the last time candidate skills were updated"""
try:
candidate_data = await self.get_candidate(candidate_id)
if candidate_data:
updated_at_str = candidate_data.get("updated_at")
if updated_at_str:
return datetime.fromisoformat(updated_at_str.replace('Z', '+00:00'))
return None
except Exception as e:
logger.error(f"❌ Error getting candidate skill update time: {e}")
return None
async def get_user_rag_update_time(self, user_id: str) -> Optional[datetime]: async def get_user_rag_update_time(self, user_id: str) -> Optional[datetime]:
"""Get the last time user's RAG data was updated""" """Get the last time user's RAG data was updated (returns timezone-aware UTC)"""
try: try:
rag_update_key = f"user:{user_id}:rag_last_update" rag_update_key = f"user:{user_id}:rag_last_update"
timestamp_str = await self.redis.get(rag_update_key) timestamp_str = await self.redis.get(rag_update_key)
if timestamp_str: if timestamp_str:
return datetime.fromisoformat(timestamp_str.decode('utf-8')) dt = datetime.fromisoformat(timestamp_str)
# Ensure the datetime is timezone-aware (assume UTC if naive)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
else:
# Convert to UTC if it's in a different timezone
dt = dt.astimezone(timezone.utc)
return dt
return None return None
except Exception as e: except Exception as e:
logger.error(f"❌ Error getting user RAG update time: {e}") logger.error(f"❌ Error getting user RAG update time: {e}")
return None return None
async def update_user_rag_timestamp(self, user_id: str) -> bool: async def update_user_rag_timestamp(self, user_id: str) -> bool:
"""Update the RAG data timestamp for a specific user (call this when user's RAG data is updated)""" """Set the user's RAG data update time (stores as UTC ISO format)"""
try: try:
update_time = datetime.now(timezone.utc)
# Ensure we're storing UTC timezone-aware format
if update_time.tzinfo is None:
update_time = update_time.replace(tzinfo=timezone.utc)
else:
update_time = update_time.astimezone(timezone.utc)
rag_update_key = f"user:{user_id}:rag_last_update" rag_update_key = f"user:{user_id}:rag_last_update"
current_time = datetime.utcnow().isoformat() # Store as ISO format with timezone info
await self.redis.set(rag_update_key, current_time) timestamp_str = update_time.isoformat() # This includes timezone
await self.redis.set(rag_update_key, timestamp_str)
return True return True
except Exception as e: except Exception as e:
logger.error(f"Error updating RAG timestamp for user {user_id}: {e}") logger.error(f"❌ Error setting user RAG update time: {e}")
return False return False
async def invalidate_candidate_skill_cache(self, candidate_id: str) -> int: async def invalidate_candidate_skill_cache(self, candidate_id: str) -> int:

View File

@ -75,7 +75,7 @@ import agents
# ============================= # =============================
from models import ( from models import (
# API # API
MOCK_UUID, ApiActivityType, ChatMessageError, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, DocumentMessage, DocumentOptions, Job, JobRequirementsMessage, LoginRequest, CreateCandidateRequest, CreateEmployerRequest, MOCK_UUID, ApiActivityType, ChatMessageError, ChatMessageSkillAssessment, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, DocumentMessage, DocumentOptions, Job, JobRequirementsMessage, LoginRequest, CreateCandidateRequest, CreateEmployerRequest,
# User models # User models
Candidate, Employer, BaseUserWithType, BaseUser, Guest, Authentication, AuthResponse, CandidateAI, Candidate, Employer, BaseUserWithType, BaseUser, Guest, Authentication, AuthResponse, CandidateAI,
@ -91,7 +91,7 @@ from models import (
Document, DocumentType, DocumentListResponse, DocumentUpdateRequest, DocumentContentResponse, Document, DocumentType, DocumentListResponse, DocumentUpdateRequest, DocumentContentResponse,
# Supporting models # Supporting models
Location, MFARequest, MFAData, MFARequestResponse, MFAVerifyRequest, RagContentMetadata, RagContentResponse, ResendVerificationRequest, Skill, SystemInfo, WorkExperience, Education, Location, MFARequest, MFAData, MFARequestResponse, MFAVerifyRequest, RagContentMetadata, RagContentResponse, ResendVerificationRequest, Skill, SkillAssessment, SystemInfo, WorkExperience, Education,
# Email # Email
EmailVerificationRequest EmailVerificationRequest
@ -4501,16 +4501,18 @@ async def get_candidate_skill_match(
requirement: str = Body(...), requirement: str = Body(...),
current_user = Depends(get_current_user_or_guest), current_user = Depends(get_current_user_or_guest),
database: RedisDatabase = Depends(get_database) database: RedisDatabase = Depends(get_database)
): ) -> StreamingResponse:
"""Get skill match for a candidate against a requirement with caching""" """Get skill match for a candidate against a requirement with caching"""
try: async def message_stream_generator():
# Find candidate by ID
candidate_data = await database.get_candidate(candidate_id) candidate_data = await database.get_candidate(candidate_id)
if not candidate_data: if not candidate_data:
return JSONResponse( error_message = ChatMessageError(
status_code=404, sessionId=MOCK_UUID, # No session ID for document uploads
content=create_error_response("CANDIDATE_NOT_FOUND", f"Candidate with ID '{candidate_id}' not found") content=f"Candidate with ID '{candidate_id}' not found"
) )
yield error_message
return
candidate = Candidate.model_validate(candidate_data) candidate = Candidate.model_validate(candidate_data)
@ -4518,93 +4520,140 @@ async def get_candidate_skill_match(
cache_key = f"skill_match:{candidate_id}:{hash(requirement)}" cache_key = f"skill_match:{candidate_id}:{hash(requirement)}"
# Get cached assessment if it exists # Get cached assessment if it exists
cached_assessment = await database.get_cached_skill_match(cache_key) assessment : SkillAssessment | None = await database.get_cached_skill_match(cache_key)
# Get the last update time for the candidate's skill information
candidate_skill_update_time = await database.get_candidate_skill_update_time(candidate_id)
# Get the latest RAG data update time for the current user # Get the latest RAG data update time for the current user
user_rag_update_time = await database.get_user_rag_update_time(current_user.id) user_rag_update_time = await database.get_user_rag_update_time(current_user.id)
# Determine if we need to regenerate the assessment # Determine if we need to regenerate the assessment
should_regenerate = True
cached_date = None cached_date = None
if assessment:
updated = assessment.updated_at if "updated_at" in assessment else assessment.created_at
# Check if cached result is still valid
# Regenerate if user's RAG data was updated after cache date
if user_rag_update_time and user_rag_update_time >= updated:
logger.info(f"🔄 Out-of-date cached entry for {candidate.username} skill {assessment.skill}")
assessment = None
else:
cached_date = updated
if cached_assessment: if assessment:
cached_date = cached_assessment.get('cached_at') logger.info(f"✅ Found cached skill match for candidate {candidate.username} against requirement: {requirement}")
if cached_date: logger.info(f"💾 Cached skill match data: {assessment.evidence_strength}")
# Check if cached result is still valid
# Regenerate if:
# 1. Candidate skills were updated after cache date
# 2. User's RAG data was updated after cache date
if (not candidate_skill_update_time or cached_date >= candidate_skill_update_time) and \
(not user_rag_update_time or cached_date >= user_rag_update_time):
should_regenerate = False
logger.info(f"🔄 Using cached skill match for candidate {candidate.id}")
if should_regenerate: # Return cached assessment
logger.info(f"🔍 Generating new skill match for candidate {candidate.id} against requirement: {requirement}") skill_message = ChatMessageSkillAssessment(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"Cached skill match found for {candidate.username}",
skill_assessment=assessment
)
yield skill_message
return
async with entities.get_candidate_entity(candidate=candidate) as candidate_entity: logger.info(f"🔍 Generating skill match for candidate {candidate.username} against requirement: {requirement}")
agent = candidate_entity.get_or_create_agent(agent_type=ChatContextType.SKILL_MATCH)
if not agent: async with entities.get_candidate_entity(candidate=candidate) as candidate_entity:
return JSONResponse( agent = candidate_entity.get_or_create_agent(agent_type=ChatContextType.SKILL_MATCH)
status_code=400, if not agent:
content=create_error_response("AGENT_NOT_FOUND", "No skill match agent found for this candidate") error_message = ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"No skill match agent found for this candidate"
) )
yield error_message
return
# Generate new skill match # Generate new skill match
skill_match = await get_last_item( final_message = None
agent.generate( async for generated_message in agent.generate(
llm=llm_manager.get_llm(), llm=llm_manager.get_llm(),
model=defines.model, model=defines.model,
session_id=MOCK_UUID, session_id=MOCK_UUID,
prompt=requirement, prompt=requirement,
), ):
) if generated_message.status == ApiStatusType.ERROR:
logger.error(f"❌ AI generation error: {generated_message.content}")
yield f"data: {json.dumps({'status': 'error'})}\n\n"
return
if skill_match is None: # If the message is not done, convert it to a ChatMessageBase to remove
return JSONResponse( # metadata and other unnecessary fields for streaming
status_code=500, if generated_message.status != ApiStatusType.DONE:
content=create_error_response("NO_MATCH", "No skill match found for the given requirement") if not isinstance(generated_message, ChatMessageStreaming) and not isinstance(generated_message, ChatMessageStatus):
raise TypeError(
f"Expected ChatMessageStreaming or ChatMessageStatus, got {type(generated_message)}"
)
yield generated_message# Convert to ChatMessageBase for streaming
# Store reference to the complete AI message
if generated_message.status == ApiStatusType.DONE:
final_message = generated_message
break
if final_message is None:
error_message = ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"No skill match found for the given requirement"
) )
yield error_message
return
skill_match_data = json.loads(skill_match.content) if not isinstance(final_message, ChatMessageSkillAssessment):
error_message = ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"Skill match response is not valid"
)
yield error_message
return
# Cache the new assessment with current timestamp skill_match : ChatMessageSkillAssessment = final_message
cached_assessment = { assessment = skill_match.skill_assessment
"skill_match": skill_match_data, if not assessment:
"cached_at": datetime.utcnow().isoformat(), error_message = ChatMessageError(
"candidate_id": candidate_id, sessionId=MOCK_UUID, # No session ID for document uploads
"requirement": requirement content=f"Skill assessment could not be generated"
} )
yield error_message
return
await database.cache_skill_match(cache_key, cached_assessment) await database.cache_skill_match(cache_key, assessment)
logger.info(f"💾 Cached new skill match for candidate {candidate.id}") logger.info(f"💾 Cached new skill match for candidate {candidate.id}")
logger.info(f"✅ Skill match found for candidate {candidate.id}: {skill_match_data['evidence_strength']}") logger.info(f"✅ Skill match found for candidate {candidate.id}: {assessment.evidence_strength}")
else: yield skill_match
# Use cached result - we know cached_assessment is not None here return
if cached_assessment is None:
return JSONResponse(
status_code=500,
content=create_error_response("CACHE_ERROR", "Unexpected cache state")
)
skill_match_data = cached_assessment["skill_match"]
logger.info(f"✅ Retrieved cached skill match for candidate {candidate.id}: {skill_match_data['evidence_strength']}")
return create_success_response({ try:
"candidateId": candidate.id, async def to_json(method):
"skillMatch": skill_match_data, try:
"cached": not should_regenerate, async for message in method:
"cacheTimestamp": cached_date json_data = message.model_dump(mode='json', by_alias=True)
}) json_str = json.dumps(json_data)
yield f"data: {json_str}\n\n".encode("utf-8")
except Exception as e:
logger.error(backstory_traceback.format_exc())
logger.error(f"Error in to_json conversion: {e}")
return
return StreamingResponse(
to_json(message_stream_generator()),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache, no-store, must-revalidate",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Nginx
"X-Content-Type-Options": "nosniff",
"Access-Control-Allow-Origin": "*", # Adjust for your CORS needs
"Transfer-Encoding": "chunked",
},
)
except Exception as e: except Exception as e:
logger.error(backstory_traceback.format_exc()) logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Get candidate skill match error: {e}") logger.error(f"❌ Document upload error: {e}")
return JSONResponse( return StreamingResponse(
status_code=500, iter([json.dumps(ChatMessageError(
content=create_error_response("SKILL_MATCH_ERROR", str(e)) sessionId=MOCK_UUID, # No session ID for document uploads
content="Failed to generate skill assessment"
).model_dump(mode='json', by_alias=True))]),
media_type="text/event-stream"
) )
@rate_limited(guest_per_minute=5, user_per_minute=30, admin_per_minute=100) @rate_limited(guest_per_minute=5, user_per_minute=30, admin_per_minute=100)

View File

@ -88,28 +88,38 @@ class Requirements(BaseModel):
raise ValueError("Requirements must be a dictionary with 'required' and 'preferred' keys.") raise ValueError("Requirements must be a dictionary with 'required' and 'preferred' keys.")
return values return values
class Citation(BaseModel):
text: str
source: str
context: str
relevance: int # 0-100 scale
class SkillStatus(str, Enum): class SkillStatus(str, Enum):
PENDING = "pending" PENDING = "pending"
COMPLETE = "complete" COMPLETE = "complete"
WAITING = "waiting" WAITING = "waiting"
ERROR = "error" ERROR = "error"
class SkillMatch(BaseModel): class SkillStrength(str, Enum):
requirement: str STRONG = "strong"
domain: str MODERATE = "moderate"
status: SkillStatus WEAK = "weak"
match_score: int = Field(..., alias='matchScore') NONE = "none"
assessment: str
citations: List[Citation] = Field(default_factory=list) class EvidenceDetail(BaseModel):
description: str source: str = Field(..., alias="source", description="The source of the evidence (e.g., resume section, position, project)")
quote: str = Field(..., alias="quote", description="Exact text from the resume or other source showing evidence")
context: str = Field(..., alias="context", description="Brief explanation of how this demonstrates the skill")
model_config = { model_config = {
"populate_by_name": True # Allow both field names and aliases "populate_by_name": True, # Allow both field names and aliases
}
class SkillAssessment(BaseModel):
candidate_id: str = Field(..., alias='candidateId')
skill: str = Field(..., alias="skill", description="The skill being assessed")
evidence_found: bool = Field(..., alias="evidenceFound", description="Whether evidence was found for the skill")
evidence_strength: SkillStrength = Field(..., alias="evidenceStrength", description="Strength of evidence found for the skill")
assessment: str = Field(..., alias="assessment", description="Short (one to two sentence) assessment of the candidate's proficiency with the skill")
description: str = Field(..., alias="description", description="Short (two to three sentence) description of what the skill is, independent of whether the candidate has that skill or not")
evidence_details: List[EvidenceDetail] = Field(default_factory=list, alias="evidenceDetails", description="List of evidence details supporting the skill assessment")
created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias='createdAt')
updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias='updatedAt')
model_config = {
"populate_by_name": True, # Allow both field names and aliases
} }
class ApiMessageType(str, Enum): class ApiMessageType(str, Enum):
@ -424,11 +434,6 @@ class ApplicationDecision(BaseModel):
date: datetime date: datetime
by: str by: str
class SkillAssessment(BaseModel):
skill_name: str = Field(..., alias="skillName")
score: Annotated[float, Field(ge=0, le=10)]
comments: Optional[str] = None
class NotificationPreference(BaseModel): class NotificationPreference(BaseModel):
type: NotificationType type: NotificationType
events: List[str] events: List[str]
@ -997,6 +1002,11 @@ class ChatMessage(ChatMessageUser):
#is_edited: bool = Field(False, alias="isEdited") #is_edited: bool = Field(False, alias="isEdited")
#edit_history: Optional[List[EditHistory]] = Field(None, alias="editHistory") #edit_history: Optional[List[EditHistory]] = Field(None, alias="editHistory")
class ChatMessageSkillAssessment(ChatMessageUser):
role: ChatSenderType = ChatSenderType.ASSISTANT
metadata: ChatMessageMetaData = Field(default=ChatMessageMetaData())
skill_assessment: SkillAssessment = Field(..., alias="skillAssessment")
class GPUInfo(BaseModel): class GPUInfo(BaseModel):
name: str name: str
memory: int memory: int