Skill assessment is now streaming

This commit is contained in:
James Ketr 2025-06-09 09:19:00 -07:00
parent 817a8e4b66
commit 781275e9a9
7 changed files with 351 additions and 195 deletions

View File

@ -22,16 +22,14 @@ import CheckCircleIcon from '@mui/icons-material/CheckCircle';
import ErrorIcon from '@mui/icons-material/Error';
import PendingIcon from '@mui/icons-material/Pending';
import WarningIcon from '@mui/icons-material/Warning';
import { Candidate, ChatMessage, ChatMessageError, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, ChatSession, JobRequirements, SkillMatch } from 'types/types';
import { Candidate, ChatMessage, ChatMessageError, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, ChatSession, EvidenceDetail, JobRequirements, SkillAssessment, SkillStatus } from 'types/types';
import { useAuth } from 'hooks/AuthContext';
import { BackstoryPageProps } from './BackstoryTab';
import { toCamelCase } from 'types/conversion';
import { Job } from 'types/types';
import { StyledMarkdown } from './StyledMarkdown';
import { Scrollable } from './Scrollable';
import { start } from 'repl';
import { TypesElement } from '@uiw/react-json-view';
import { useAppState } from 'hooks/GlobalContext';
import * as Types from 'types/types';
interface JobAnalysisProps extends BackstoryPageProps {
job: Job;
@ -42,6 +40,12 @@ const defaultMessage: ChatMessage = {
status: "done", type: "text", sessionId: "", timestamp: new Date(), content: "", role: "assistant", metadata: null as any
};
interface SkillMatch extends SkillAssessment {
domain: string;
status: SkillStatus;
matchScore: number;
}
const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) => {
const {
job,
@ -60,6 +64,7 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
const [statusMessage, setStatusMessage] = useState<ChatMessage | null>(null);
const [startAnalysis, setStartAnalysis] = useState<boolean>(false);
const [analyzing, setAnalyzing] = useState<boolean>(false);
const [matchStatus, setMatchStatus] = useState<string>('');
const isMobile = useMediaQuery(theme.breakpoints.down('sm'));
@ -97,14 +102,17 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
job.requirements.preferredAttributes.forEach(req => requirements.push({ requirement: req, domain: 'Preferred Attributes' }));
}
const initialSkillMatches = requirements.map(req => ({
requirement: req.requirement,
const initialSkillMatches: SkillMatch[] = requirements.map(req => ({
skill: req.requirement,
candidateId: candidate.id || "",
domain: req.domain,
status: 'waiting' as const,
assessment: "",
description: "",
evidenceFound: false,
evidenceStrength: "none",
evidenceDetails: [],
matchScore: 0,
assessment: '',
description: '',
citations: []
}));
setRequirements(requirements);
@ -118,6 +126,13 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
initializeRequirements(job);
}, [job]);
const skillMatchHandlers = {
onStatus: (status: Types.ChatMessageStatus) => {
console.log('status:', status.content);
setMatchStatus(status.content);
},
};
// Fetch match data for each requirement
useEffect(() => {
if (!startAnalysis || analyzing || !job.requirements) {
@ -136,10 +151,12 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
return updated;
});
const result: any = await apiClient.candidateMatchForRequirement(candidate.id || '', requirements[i].requirement);
const skillMatch = result.skillMatch;
const request: any = await apiClient.candidateMatchForRequirement(candidate.id || '', requirements[i].requirement, skillMatchHandlers);
const result = await request.promise;
const skillMatch = result.skillAssessment;
setMatchStatus('');
let matchScore: number = 0;
switch (skillMatch.evidenceStrength) {
switch (skillMatch.evidenceStrength.toUpperCase()) {
case "STRONG": matchScore = 100; break;
case "MODERATE": matchScore = 75; break;
case "WEAK": matchScore = 50; break;
@ -149,13 +166,10 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
matchScore = Math.min(skillMatch.citations.length * 8, 40);
}
const match: SkillMatch = {
status: "complete",
...skillMatch,
status: 'complete',
matchScore,
domain: requirements[i].domain,
requirement: skillMatch.skill,
assessment: skillMatch.assessment,
citations: skillMatch.evidenceDetails.map((evidence: any) => { return { source: evidence.source, text: evidence.quote, context: evidence.context } }),
description: skillMatch.description
};
setSkillMatches(prev => {
const updated = [...prev];
@ -357,7 +371,7 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
{getStatusIcon(match.status, match.matchScore)}
<Box sx={{ display: "flex", flexDirection: "column", gap: 0, p: 0, m: 0 }}>
<Typography sx={{ ml: 1, mb: 0, fontWeight: 'medium', marginBottom: "0px !important" }}>
{match.requirement}
{match.skill}
</Typography>
<Typography variant="caption" sx={{ ml: 1, fontWeight: 'light' }}>
{match.domain}
@ -402,7 +416,7 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
<Box sx={{ width: '100%', p: 2 }}>
<LinearProgress />
<Typography sx={{ mt: 2 }}>
Analyzing candidate's match for this requirement...
Analyzing candidate's match for this requirement... {matchStatus}
</Typography>
</Box>
) : match.status === 'error' ? (
@ -422,10 +436,10 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
<Typography variant="h6" gutterBottom>
Supporting Evidence
</Typography>
{match.citations && match.citations.length > 0 ? (
match.citations.map((citation, citIndex) => (
{match.evidenceDetails && match.evidenceDetails.length > 0 ? (
match.evidenceDetails.map((evidence, evndex) => (
<Card
key={citIndex}
key={evndex}
variant="outlined"
sx={{
mb: 2,
@ -435,14 +449,14 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) =
>
<CardContent>
<Typography variant="body1" component="div" sx={{ mb: 1, fontStyle: 'italic' }}>
"{citation.text}"
"{evidence.quote}"
</Typography>
<Box sx={{ display: 'flex', justifyContent: 'space-between', alignItems: 'flex-start', flexDirection: "column" }}>
<Typography variant="body2" color="text.secondary">
Relevance: {citation.context}
Relevance: {evidence.context}
</Typography>
<Typography variant="caption" color="text.secondary">
Source: {citation.source}
Source: {evidence.source}
</Typography>
{/* <Chip
size="small"

View File

@ -910,16 +910,15 @@ class ApiClient {
return this.streamify<Types.DocumentMessage>(`/jobs/requirements/${jobId}`, null, streamingOptions);
}
async candidateMatchForRequirement(candidate_id: string, requirement: string) : Promise<Types.SkillMatch> {
const response = await fetch(`${this.baseUrl}/candidates/${candidate_id}/skill-match`, {
method: 'POST',
candidateMatchForRequirement(candidate_id: string, requirement: string,
streamingOptions?: StreamingOptions<Types.ChatMessageSkillAssessment>)
: StreamingResponse<Types.ChatMessageSkillAssessment> {
const body = JSON.stringify(requirement);
streamingOptions = {
...streamingOptions,
headers: this.defaultHeaders,
body: JSON.stringify(requirement)
});
const result = await handleApiResponse<Types.SkillMatch>(response);
return result;
};
return this.streamify<Types.ChatMessageSkillAssessment>(`/candidates/${candidate_id}/skill-match`, body, streamingOptions);
}
async updateCandidateDocument(document: Types.Document) : Promise<Types.Document> {

View File

@ -1,6 +1,6 @@
// Generated TypeScript types from Pydantic models
// Source: src/backend/models.py
// Generated on: 2025-06-09T03:32:01.335483
// Generated on: 2025-06-09T15:19:57.985888
// DO NOT EDIT MANUALLY - This file is auto-generated
// ============================
@ -53,6 +53,8 @@ export type SkillLevel = "beginner" | "intermediate" | "advanced" | "expert";
export type SkillStatus = "pending" | "complete" | "waiting" | "error";
export type SkillStrength = "strong" | "moderate" | "weak" | "none";
export type SocialPlatform = "linkedin" | "twitter" | "github" | "dribbble" | "behance" | "website" | "other";
export type SortOrder = "asc" | "desc";
@ -339,6 +341,20 @@ export interface ChatMessageRagSearch {
content: Array<ChromaDBGetResponse>;
}
export interface ChatMessageSkillAssessment {
id?: string;
sessionId: string;
senderId?: string;
status: "streaming" | "status" | "done" | "error";
type: "binary" | "text" | "json";
timestamp?: Date;
role: "user" | "assistant" | "system" | "information" | "warning" | "error";
content: string;
tunables?: Tunables;
metadata: ChatMessageMetaData;
skillAssessment: SkillAssessment;
}
export interface ChatMessageStatus {
id?: string;
sessionId: string;
@ -575,6 +591,12 @@ export interface ErrorDetail {
details?: any;
}
export interface EvidenceDetail {
source: string;
quote: string;
context: string;
}
export interface GPUInfo {
name: string;
memory: number;
@ -975,19 +997,15 @@ export interface Skill {
}
export interface SkillAssessment {
skillName: string;
score: number;
comments?: string;
}
export interface SkillMatch {
requirement: string;
domain: string;
status: "pending" | "complete" | "waiting" | "error";
matchScore: number;
candidateId: string;
skill: string;
evidenceFound: boolean;
evidenceStrength: "strong" | "moderate" | "weak" | "none";
assessment: string;
citations?: Array<Citation>;
description: string;
evidenceDetails?: Array<EvidenceDetail>;
createdAt?: Date;
updatedAt?: Date;
}
export interface SocialLink {
@ -1250,6 +1268,19 @@ export function convertChatMessageRagSearchFromApi(data: any): ChatMessageRagSea
timestamp: data.timestamp ? new Date(data.timestamp) : undefined,
};
}
/**
* Convert ChatMessageSkillAssessment from API response, parsing date fields
* Date fields: timestamp
*/
export function convertChatMessageSkillAssessmentFromApi(data: any): ChatMessageSkillAssessment {
if (!data) return data;
return {
...data,
// Convert timestamp from ISO string to Date
timestamp: data.timestamp ? new Date(data.timestamp) : undefined,
};
}
/**
* Convert ChatMessageStatus from API response, parsing date fields
* Date fields: timestamp
@ -1551,6 +1582,21 @@ export function convertRefreshTokenFromApi(data: any): RefreshToken {
expiresAt: new Date(data.expiresAt),
};
}
/**
* Convert SkillAssessment from API response, parsing date fields
* Date fields: createdAt, updatedAt
*/
export function convertSkillAssessmentFromApi(data: any): SkillAssessment {
if (!data) return data;
return {
...data,
// Convert createdAt from ISO string to Date
createdAt: data.createdAt ? new Date(data.createdAt) : undefined,
// Convert updatedAt from ISO string to Date
updatedAt: data.updatedAt ? new Date(data.updatedAt) : undefined,
};
}
/**
* Convert UserActivity from API response, parsing date fields
* Date fields: timestamp
@ -1614,6 +1660,8 @@ export function convertFromApi<T>(data: any, modelType: string): T {
return convertChatMessageErrorFromApi(data) as T;
case 'ChatMessageRagSearch':
return convertChatMessageRagSearchFromApi(data) as T;
case 'ChatMessageSkillAssessment':
return convertChatMessageSkillAssessmentFromApi(data) as T;
case 'ChatMessageStatus':
return convertChatMessageStatusFromApi(data) as T;
case 'ChatMessageStreaming':
@ -1656,6 +1704,8 @@ export function convertFromApi<T>(data: any, modelType: string): T {
return convertRateLimitStatusFromApi(data) as T;
case 'RefreshToken':
return convertRefreshTokenFromApi(data) as T;
case 'SkillAssessment':
return convertSkillAssessmentFromApi(data) as T;
case 'UserActivity':
return convertUserActivityFromApi(data) as T;
case 'WorkExperience':

View File

@ -1,5 +1,5 @@
from __future__ import annotations
from pydantic import model_validator, Field
from pydantic import model_validator, Field # type: ignore
from typing import (
Dict,
Literal,
@ -16,16 +16,19 @@ import json
import asyncio
import time
import asyncio
import numpy as np
import numpy as np # type: ignore
from .base import Agent, agent_registry, LLMMessage
from models import Candidate, ChatMessage, ChatMessageError, ChatMessageMetaData, ApiMessageType, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, ChatOptions, ChatSenderType, ApiStatusType, SkillMatch, Tunables
from models import (Candidate, ChatMessage, ChatMessageError, ChatMessageMetaData, ApiMessageType,
ChatMessageSkillAssessment, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, ChatOptions,
ChatSenderType, ApiStatusType, EvidenceDetail, SkillAssessment, Tunables)
import model_cast
from logger import logger
import defines
import backstory_traceback as traceback
class SkillMatchAgent(Agent):
agent_type: Literal["skill_match"] = "skill_match"
agent_type: Literal["skill_match"] = "skill_match" # type: ignore
_agent_type: ClassVar[str] = agent_type # Add this for registration
def generate_skill_assessment_prompt(self, skill, rag_context):
@ -141,6 +144,14 @@ JSON RESPONSE:"""
tunables: Optional[Tunables] = None,
temperature=0.7
) -> AsyncGenerator[ChatMessage | ChatMessageStatus | ChatMessageError | ChatMessageStreaming, None]:
if not self.user:
error_message = ChatMessageError(
session_id=session_id,
content="Agent not attached to user. Attach the agent to a user before generating responses."
)
logger.error(f"⚠️ {error_message.content}")
yield error_message
return
# Stage 1A: Analyze job requirements
rag_message = None
async for rag_message in self.generate_rag_results(session_id=session_id, prompt=prompt):
@ -163,16 +174,16 @@ JSON RESPONSE:"""
rag_context = self.get_rag_context(rag_message)
system_prompt, prompt = self.generate_skill_assessment_prompt(skill=prompt, rag_context=rag_context)
skill_assessment = None
async for skill_assessment in self.llm_one_shot(llm=llm, model=model, session_id=session_id, prompt=prompt, system_prompt=system_prompt, temperature=0.7):
if skill_assessment.status == ApiStatusType.ERROR:
logger.error(f"⚠️ {skill_assessment.content}")
yield skill_assessment
skill_message = None
async for skill_message in self.llm_one_shot(llm=llm, model=model, session_id=session_id, prompt=prompt, system_prompt=system_prompt, temperature=0.7):
if skill_message.status == ApiStatusType.ERROR:
logger.error(f"⚠️ {skill_message.content}")
yield skill_message
return
if skill_assessment.status != ApiStatusType.DONE:
yield skill_assessment
if skill_message.status != ApiStatusType.DONE:
yield skill_message
if skill_assessment is None:
if skill_message is None:
error_message = ChatMessageError(
session_id=session_id,
content="Skill assessment failed to generate a response."
@ -181,24 +192,42 @@ JSON RESPONSE:"""
yield error_message
return
json_str = self.extract_json_from_text(skill_assessment.content)
json_str = self.extract_json_from_text(skill_message.content)
skill_assessment_data = ""
skill_assessment = None
try:
skill_assessment_data = json.loads(json_str).get("skill_assessment", {})
skill_assessment = SkillAssessment(
candidate_id=self.user.id,
skill=skill_assessment_data.get("skill", ""),
evidence_found=skill_assessment_data.get("evidence_found", False),
evidence_strength=skill_assessment_data.get("evidence_strength", "NONE").lower(),
assessment=skill_assessment_data.get("assessment", ""),
description=skill_assessment_data.get("description", ""),
evidence_details=[
EvidenceDetail(
source=evidence.get("source", ""),
quote=evidence.get("quote", ""),
context=evidence.get("context", "")
) for evidence in skill_assessment_data.get("evidence_details", [])
]
)
except Exception as e:
error_message = ChatMessageError(
session_id=session_id,
content=f"Failed to parse Skill assessment JSON: {str(e)}\n\n{skill_assessment.content}"
content=f"Failed to parse Skill assessment JSON: {str(e)}\n\n{skill_message.content}"
)
logger.error(traceback.format_exc())
logger.error(f"⚠️ {error_message.content}")
yield error_message
return
skill_assessment_message = ChatMessage(
skill_assessment_message = ChatMessageSkillAssessment(
session_id=session_id,
status=ApiStatusType.DONE,
content=json.dumps(skill_assessment_data),
metadata=skill_assessment.metadata
metadata=skill_message.metadata,
skill_assessment=skill_assessment,
)
yield skill_assessment_message
logger.info(f"✅ Skill assessment completed successfully.")

View File

@ -7,7 +7,7 @@ from datetime import datetime, timezone, UTC, timedelta
import asyncio
from models import (
# User models
Candidate, Employer, BaseUser, Guest, Authentication, AuthResponse,
Candidate, Employer, BaseUser, EvidenceDetail, Guest, Authentication, AuthResponse, SkillAssessment,
)
import backstory_traceback as traceback
@ -255,64 +255,69 @@ class RedisDatabase:
logger.error(f"Error deleting all documents for candidate {candidate_id}: {e}")
raise
async def get_cached_skill_match(self, cache_key: str) -> Optional[Dict[str, Any]]:
async def get_cached_skill_match(self, cache_key: str) -> Optional[SkillAssessment]:
"""Get cached skill match assessment"""
try:
data = await self.redis.get(cache_key)
if data:
return json.loads(data)
json_str = await self.redis.get(cache_key)
if json_str:
json_data = json.loads(json_str)
skill_assessment = SkillAssessment.model_validate(json_data)
return skill_assessment
return None
except Exception as e:
logger.error(f"❌ Error getting cached skill match: {e}")
return None
async def cache_skill_match(self, cache_key: str, assessment_data: Dict[str, Any]) -> None:
async def cache_skill_match(self, cache_key: str, assessment: SkillAssessment) -> None:
"""Cache skill match assessment"""
try:
# Cache for 1 hour by default
await self.redis.setex(
cache_key,
3600,
json.dumps(assessment_data)
json.dumps(assessment.model_dump(mode='json', by_alias=True), default=str) # Serialize with datetime handling
)
logger.debug(f"💾 Skill match cached: {cache_key}")
except Exception as e:
logger.error(f"❌ Error caching skill match: {e}")
async def get_candidate_skill_update_time(self, candidate_id: str) -> Optional[datetime]:
"""Get the last time candidate skills were updated"""
try:
candidate_data = await self.get_candidate(candidate_id)
if candidate_data:
updated_at_str = candidate_data.get("updated_at")
if updated_at_str:
return datetime.fromisoformat(updated_at_str.replace('Z', '+00:00'))
return None
except Exception as e:
logger.error(f"❌ Error getting candidate skill update time: {e}")
return None
async def get_user_rag_update_time(self, user_id: str) -> Optional[datetime]:
"""Get the last time user's RAG data was updated"""
"""Get the last time user's RAG data was updated (returns timezone-aware UTC)"""
try:
rag_update_key = f"user:{user_id}:rag_last_update"
timestamp_str = await self.redis.get(rag_update_key)
if timestamp_str:
return datetime.fromisoformat(timestamp_str.decode('utf-8'))
dt = datetime.fromisoformat(timestamp_str)
# Ensure the datetime is timezone-aware (assume UTC if naive)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
else:
# Convert to UTC if it's in a different timezone
dt = dt.astimezone(timezone.utc)
return dt
return None
except Exception as e:
logger.error(f"❌ Error getting user RAG update time: {e}")
return None
return None
async def update_user_rag_timestamp(self, user_id: str) -> bool:
"""Update the RAG data timestamp for a specific user (call this when user's RAG data is updated)"""
"""Set the user's RAG data update time (stores as UTC ISO format)"""
try:
update_time = datetime.now(timezone.utc)
# Ensure we're storing UTC timezone-aware format
if update_time.tzinfo is None:
update_time = update_time.replace(tzinfo=timezone.utc)
else:
update_time = update_time.astimezone(timezone.utc)
rag_update_key = f"user:{user_id}:rag_last_update"
current_time = datetime.utcnow().isoformat()
await self.redis.set(rag_update_key, current_time)
# Store as ISO format with timezone info
timestamp_str = update_time.isoformat() # This includes timezone
await self.redis.set(rag_update_key, timestamp_str)
return True
except Exception as e:
logger.error(f"Error updating RAG timestamp for user {user_id}: {e}")
logger.error(f"❌ Error setting user RAG update time: {e}")
return False
async def invalidate_candidate_skill_cache(self, candidate_id: str) -> int:

View File

@ -75,7 +75,7 @@ import agents
# =============================
from models import (
# API
MOCK_UUID, ApiActivityType, ChatMessageError, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, DocumentMessage, DocumentOptions, Job, JobRequirementsMessage, LoginRequest, CreateCandidateRequest, CreateEmployerRequest,
MOCK_UUID, ApiActivityType, ChatMessageError, ChatMessageSkillAssessment, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, DocumentMessage, DocumentOptions, Job, JobRequirementsMessage, LoginRequest, CreateCandidateRequest, CreateEmployerRequest,
# User models
Candidate, Employer, BaseUserWithType, BaseUser, Guest, Authentication, AuthResponse, CandidateAI,
@ -91,7 +91,7 @@ from models import (
Document, DocumentType, DocumentListResponse, DocumentUpdateRequest, DocumentContentResponse,
# Supporting models
Location, MFARequest, MFAData, MFARequestResponse, MFAVerifyRequest, RagContentMetadata, RagContentResponse, ResendVerificationRequest, Skill, SystemInfo, WorkExperience, Education,
Location, MFARequest, MFAData, MFARequestResponse, MFAVerifyRequest, RagContentMetadata, RagContentResponse, ResendVerificationRequest, Skill, SkillAssessment, SystemInfo, WorkExperience, Education,
# Email
EmailVerificationRequest
@ -4501,16 +4501,18 @@ async def get_candidate_skill_match(
requirement: str = Body(...),
current_user = Depends(get_current_user_or_guest),
database: RedisDatabase = Depends(get_database)
):
) -> StreamingResponse:
"""Get skill match for a candidate against a requirement with caching"""
try:
# Find candidate by ID
async def message_stream_generator():
candidate_data = await database.get_candidate(candidate_id)
if not candidate_data:
return JSONResponse(
status_code=404,
content=create_error_response("CANDIDATE_NOT_FOUND", f"Candidate with ID '{candidate_id}' not found")
error_message = ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"Candidate with ID '{candidate_id}' not found"
)
yield error_message
return
candidate = Candidate.model_validate(candidate_data)
@ -4518,94 +4520,141 @@ async def get_candidate_skill_match(
cache_key = f"skill_match:{candidate_id}:{hash(requirement)}"
# Get cached assessment if it exists
cached_assessment = await database.get_cached_skill_match(cache_key)
# Get the last update time for the candidate's skill information
candidate_skill_update_time = await database.get_candidate_skill_update_time(candidate_id)
assessment : SkillAssessment | None = await database.get_cached_skill_match(cache_key)
# Get the latest RAG data update time for the current user
user_rag_update_time = await database.get_user_rag_update_time(current_user.id)
# Determine if we need to regenerate the assessment
should_regenerate = True
cached_date = None
if cached_assessment:
cached_date = cached_assessment.get('cached_at')
if cached_date:
# Check if cached result is still valid
# Regenerate if:
# 1. Candidate skills were updated after cache date
# 2. User's RAG data was updated after cache date
if (not candidate_skill_update_time or cached_date >= candidate_skill_update_time) and \
(not user_rag_update_time or cached_date >= user_rag_update_time):
should_regenerate = False
logger.info(f"🔄 Using cached skill match for candidate {candidate.id}")
if should_regenerate:
logger.info(f"🔍 Generating new skill match for candidate {candidate.id} against requirement: {requirement}")
async with entities.get_candidate_entity(candidate=candidate) as candidate_entity:
agent = candidate_entity.get_or_create_agent(agent_type=ChatContextType.SKILL_MATCH)
if not agent:
return JSONResponse(
status_code=400,
content=create_error_response("AGENT_NOT_FOUND", "No skill match agent found for this candidate")
)
# Generate new skill match
skill_match = await get_last_item(
agent.generate(
llm=llm_manager.get_llm(),
model=defines.model,
session_id=MOCK_UUID,
prompt=requirement,
),
)
if skill_match is None:
return JSONResponse(
status_code=500,
content=create_error_response("NO_MATCH", "No skill match found for the given requirement")
)
skill_match_data = json.loads(skill_match.content)
# Cache the new assessment with current timestamp
cached_assessment = {
"skill_match": skill_match_data,
"cached_at": datetime.utcnow().isoformat(),
"candidate_id": candidate_id,
"requirement": requirement
}
await database.cache_skill_match(cache_key, cached_assessment)
logger.info(f"💾 Cached new skill match for candidate {candidate.id}")
logger.info(f"✅ Skill match found for candidate {candidate.id}: {skill_match_data['evidence_strength']}")
else:
# Use cached result - we know cached_assessment is not None here
if cached_assessment is None:
return JSONResponse(
status_code=500,
content=create_error_response("CACHE_ERROR", "Unexpected cache state")
)
skill_match_data = cached_assessment["skill_match"]
logger.info(f"✅ Retrieved cached skill match for candidate {candidate.id}: {skill_match_data['evidence_strength']}")
if assessment:
updated = assessment.updated_at if "updated_at" in assessment else assessment.created_at
# Check if cached result is still valid
# Regenerate if user's RAG data was updated after cache date
if user_rag_update_time and user_rag_update_time >= updated:
logger.info(f"🔄 Out-of-date cached entry for {candidate.username} skill {assessment.skill}")
assessment = None
else:
cached_date = updated
return create_success_response({
"candidateId": candidate.id,
"skillMatch": skill_match_data,
"cached": not should_regenerate,
"cacheTimestamp": cached_date
})
if assessment:
logger.info(f"✅ Found cached skill match for candidate {candidate.username} against requirement: {requirement}")
logger.info(f"💾 Cached skill match data: {assessment.evidence_strength}")
# Return cached assessment
skill_message = ChatMessageSkillAssessment(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"Cached skill match found for {candidate.username}",
skill_assessment=assessment
)
yield skill_message
return
logger.info(f"🔍 Generating skill match for candidate {candidate.username} against requirement: {requirement}")
async with entities.get_candidate_entity(candidate=candidate) as candidate_entity:
agent = candidate_entity.get_or_create_agent(agent_type=ChatContextType.SKILL_MATCH)
if not agent:
error_message = ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"No skill match agent found for this candidate"
)
yield error_message
return
# Generate new skill match
final_message = None
async for generated_message in agent.generate(
llm=llm_manager.get_llm(),
model=defines.model,
session_id=MOCK_UUID,
prompt=requirement,
):
if generated_message.status == ApiStatusType.ERROR:
logger.error(f"❌ AI generation error: {generated_message.content}")
yield f"data: {json.dumps({'status': 'error'})}\n\n"
return
# If the message is not done, convert it to a ChatMessageBase to remove
# metadata and other unnecessary fields for streaming
if generated_message.status != ApiStatusType.DONE:
if not isinstance(generated_message, ChatMessageStreaming) and not isinstance(generated_message, ChatMessageStatus):
raise TypeError(
f"Expected ChatMessageStreaming or ChatMessageStatus, got {type(generated_message)}"
)
yield generated_message# Convert to ChatMessageBase for streaming
# Store reference to the complete AI message
if generated_message.status == ApiStatusType.DONE:
final_message = generated_message
break
if final_message is None:
error_message = ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"No skill match found for the given requirement"
)
yield error_message
return
if not isinstance(final_message, ChatMessageSkillAssessment):
error_message = ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"Skill match response is not valid"
)
yield error_message
return
skill_match : ChatMessageSkillAssessment = final_message
assessment = skill_match.skill_assessment
if not assessment:
error_message = ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"Skill assessment could not be generated"
)
yield error_message
return
await database.cache_skill_match(cache_key, assessment)
logger.info(f"💾 Cached new skill match for candidate {candidate.id}")
logger.info(f"✅ Skill match found for candidate {candidate.id}: {assessment.evidence_strength}")
yield skill_match
return
try:
async def to_json(method):
try:
async for message in method:
json_data = message.model_dump(mode='json', by_alias=True)
json_str = json.dumps(json_data)
yield f"data: {json_str}\n\n".encode("utf-8")
except Exception as e:
logger.error(backstory_traceback.format_exc())
logger.error(f"Error in to_json conversion: {e}")
return
return StreamingResponse(
to_json(message_stream_generator()),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache, no-store, must-revalidate",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Nginx
"X-Content-Type-Options": "nosniff",
"Access-Control-Allow-Origin": "*", # Adjust for your CORS needs
"Transfer-Encoding": "chunked",
},
)
except Exception as e:
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Get candidate skill match error: {e}")
return JSONResponse(
status_code=500,
content=create_error_response("SKILL_MATCH_ERROR", str(e))
)
logger.error(f"❌ Document upload error: {e}")
return StreamingResponse(
iter([json.dumps(ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content="Failed to generate skill assessment"
).model_dump(mode='json', by_alias=True))]),
media_type="text/event-stream"
)
@rate_limited(guest_per_minute=5, user_per_minute=30, admin_per_minute=100)
@api_router.get("/candidates/{username}/chat-sessions")

View File

@ -88,28 +88,38 @@ class Requirements(BaseModel):
raise ValueError("Requirements must be a dictionary with 'required' and 'preferred' keys.")
return values
class Citation(BaseModel):
text: str
source: str
context: str
relevance: int # 0-100 scale
class SkillStatus(str, Enum):
PENDING = "pending"
COMPLETE = "complete"
WAITING = "waiting"
ERROR = "error"
class SkillMatch(BaseModel):
requirement: str
domain: str
status: SkillStatus
match_score: int = Field(..., alias='matchScore')
assessment: str
citations: List[Citation] = Field(default_factory=list)
description: str
class SkillStrength(str, Enum):
STRONG = "strong"
MODERATE = "moderate"
WEAK = "weak"
NONE = "none"
class EvidenceDetail(BaseModel):
source: str = Field(..., alias="source", description="The source of the evidence (e.g., resume section, position, project)")
quote: str = Field(..., alias="quote", description="Exact text from the resume or other source showing evidence")
context: str = Field(..., alias="context", description="Brief explanation of how this demonstrates the skill")
model_config = {
"populate_by_name": True # Allow both field names and aliases
"populate_by_name": True, # Allow both field names and aliases
}
class SkillAssessment(BaseModel):
candidate_id: str = Field(..., alias='candidateId')
skill: str = Field(..., alias="skill", description="The skill being assessed")
evidence_found: bool = Field(..., alias="evidenceFound", description="Whether evidence was found for the skill")
evidence_strength: SkillStrength = Field(..., alias="evidenceStrength", description="Strength of evidence found for the skill")
assessment: str = Field(..., alias="assessment", description="Short (one to two sentence) assessment of the candidate's proficiency with the skill")
description: str = Field(..., alias="description", description="Short (two to three sentence) description of what the skill is, independent of whether the candidate has that skill or not")
evidence_details: List[EvidenceDetail] = Field(default_factory=list, alias="evidenceDetails", description="List of evidence details supporting the skill assessment")
created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias='createdAt')
updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias='updatedAt')
model_config = {
"populate_by_name": True, # Allow both field names and aliases
}
class ApiMessageType(str, Enum):
@ -424,11 +434,6 @@ class ApplicationDecision(BaseModel):
date: datetime
by: str
class SkillAssessment(BaseModel):
skill_name: str = Field(..., alias="skillName")
score: Annotated[float, Field(ge=0, le=10)]
comments: Optional[str] = None
class NotificationPreference(BaseModel):
type: NotificationType
events: List[str]
@ -997,6 +1002,11 @@ class ChatMessage(ChatMessageUser):
#is_edited: bool = Field(False, alias="isEdited")
#edit_history: Optional[List[EditHistory]] = Field(None, alias="editHistory")
class ChatMessageSkillAssessment(ChatMessageUser):
role: ChatSenderType = ChatSenderType.ASSISTANT
metadata: ChatMessageMetaData = Field(default=ChatMessageMetaData())
skill_assessment: SkillAssessment = Field(..., alias="skillAssessment")
class GPUInfo(BaseModel):
name: str
memory: int