Resume generation almost working

This commit is contained in:
James Ketr 2025-06-09 16:14:32 -07:00
parent dd0ab5eda6
commit a197535bea
14 changed files with 595 additions and 61 deletions

View File

@ -1,12 +1,21 @@
import React, { useState, useCallback, useRef } from 'react';
import React, { useState, useCallback, useRef, useEffect } from 'react';
import {
Tabs,
Tab,
Box,
Button,
Paper,
Typography,
} from '@mui/material';
import { Job, Candidate, SkillAssessment } from "types/types";
import JsonView from '@uiw/react-json-view';
import { Scrollable } from './Scrollable';
import { useAuth } from 'hooks/AuthContext';
import * as Types from 'types/types';
import { StyledMarkdown } from './StyledMarkdown';
import { Message } from './Message';
import InputIcon from '@mui/icons-material/Input';
import TuneIcon from '@mui/icons-material/Tune';
import ArticleIcon from '@mui/icons-material/Article';
interface ResumeGeneratorProps {
job: Job;
@ -15,31 +24,75 @@ interface ResumeGeneratorProps {
onComplete?: (resume: string) => void;
}
const defaultMessage: Types.ChatMessageStatus = {
status: "done", type: "text", sessionId: "", timestamp: new Date(), content: "", activity: 'info'
};
const ResumeGenerator: React.FC<ResumeGeneratorProps> = (props: ResumeGeneratorProps) => {
const { job, candidate, skills, onComplete } = props;
const [resume, setResume] = useState<string>('Generated resume goes here...');
const { apiClient } = useAuth();
const [resume, setResume] = useState<string>('');
const [prompt, setPrompt] = useState<string>('');
const [systemPrompt, setSystemPrompt] = useState<string>('');
const [generating, setGenerating] = useState<boolean>(false);
const [statusMessage, setStatusMessage] = useState<Types.ChatMessageStatus | null>(null);
const [tabValue, setTabValue] = useState<string>('resume');
const handleTabChange = (event: React.SyntheticEvent, newValue: string) => {
setTabValue(newValue);
}
// State for editing job description
const generateResume = () => {
setResume('Generation begins...');
setGenerating(true);
setTimeout(() => {
setGenerating(false);
setResume('Generation complete');
onComplete && onComplete(resume);
}, 3000);
const generateResumeHandlers = {
onStatus: (status: Types.ChatMessageStatus) => {
setStatusMessage({...defaultMessage, content: status.content.toLowerCase});
},
onStreaming: (chunk: Types.ChatMessageStreaming) =>{
setResume(chunk.content);
},
onComplete: () => {
setStatusMessage(null);
}
};
useEffect(() => {
if (!job || !candidate || !skills || resume || generating) {
return;
}
const generateResume = async () => {
setGenerating(true);
const request : any = await apiClient.generateResume(candidate.id || '', skills, generateResumeHandlers);
const result = await request.promise;
setSystemPrompt(result.systemPrompt)
setPrompt(result.prompt)
setResume(result.resume)
setGenerating(false);
};
generateResume()
}, [job, candidate, apiClient, resume, skills, generating]);
return (
<Box
className="ResumeGenerator"
sx={{display: "flex", flexDirection: "row", width: "100%"}}>
<JsonView value={skills}/>
<Box sx={{display: "flex", flexDirection: "column"}}>
<Box>{resume}</Box>
<Button disabled={generating} onClick={generateResume} variant="contained">Generate Resume</Button>
</Box>
sx={{
display: "flex",
flexDirection: "column",
}}>
<Box sx={{ borderBottom: 1, borderColor: 'divider', mb: 3 }}>
<Tabs value={tabValue} onChange={handleTabChange} centered>
<Tab value="system" icon={<TuneIcon />} label="System" />
<Tab value="prompt" icon={<InputIcon />} label="Prompt" />
<Tab value="resume" icon={<ArticleIcon />} label="Resume" />
</Tabs>
</Box>
{ statusMessage && <Message message={statusMessage} />}
<Paper elevation={3} sx={{ p: 3, m: 4, mt: 0 }}><Scrollable autoscroll sx={{display: "flex", flexGrow: 1}}>
{ tabValue === 'system' && <pre>{systemPrompt}</pre> }
{ tabValue === 'prompt' && <pre>{prompt}</pre> }
{ tabValue === 'resume' && <StyledMarkdown content={resume} />}
</Scrollable></Paper>
</Box>
)

View File

@ -257,7 +257,7 @@ const Header: React.FC<HeaderProps> = (props: HeaderProps) => {
// Render desktop navigation with dropdowns
const renderDesktopNavigation = () => {
return (
<Box sx={{ display: 'flex', width: "100%", alignItems: 'center', justifyContent: "space-between"}}>
<Box sx={{ display: 'flex', width: "100%", alignItems: 'center', justifyContent: "space-between", "& > :last-of-type": { marginRight: "auto"} }}>
{navigationItems.map((item, index) => {
const hasChildren = item.children && item.children.length > 0;
const isActive = isCurrentPath(item) || hasActiveChild(item);

View File

@ -8,7 +8,7 @@ import {
import { useNavigate } from 'react-router-dom';
interface LoginRequiredProps {
asset: string;
asset: string;
}
const LoginRequired = (props: LoginRequiredProps) => {
const { asset } = props;

View File

@ -228,13 +228,6 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
</Box>
);
// If no user is logged in, show message
if (!user?.id) {
return (
<LoginRequired asset="candidate analysis" />
);
}
return (
<Box sx={{
display: "flex", flexDirection: "column",

View File

@ -909,6 +909,14 @@ class ApiClient {
return this.streamify<Types.DocumentMessage>(`/jobs/requirements/${jobId}`, null, streamingOptions);
}
generateResume(candidateId: string, skills: Types.SkillAssessment[], streamingOptions?: StreamingOptions<Types.ChatMessageResume>): StreamingResponse<Types.ChatMessageResume> {
const body = JSON.stringify(skills);
streamingOptions = {
...streamingOptions,
headers: this.defaultHeaders,
};
return this.streamify<Types.ChatMessageResume>(`/candidates/${candidateId}/generate-resume`, body, streamingOptions);
}
candidateMatchForRequirement(candidate_id: string, requirement: string,
streamingOptions?: StreamingOptions<Types.ChatMessageSkillAssessment>)
: StreamingResponse<Types.ChatMessageSkillAssessment> {

View File

@ -1,6 +1,6 @@
// Generated TypeScript types from Pydantic models
// Source: src/backend/models.py
// Generated on: 2025-06-09T17:45:24.922154
// Generated on: 2025-06-09T20:36:06.432367
// DO NOT EDIT MANUALLY - This file is auto-generated
// ============================
@ -19,7 +19,7 @@ export type ApiStatusType = "streaming" | "status" | "done" | "error";
export type ApplicationStatus = "applied" | "reviewing" | "interview" | "offer" | "rejected" | "accepted" | "withdrawn";
export type ChatContextType = "job_search" | "job_requirements" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_image" | "rag_search" | "skill_match";
export type ChatContextType = "job_search" | "job_requirements" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_resume" | "generate_image" | "rag_search" | "skill_match";
export type ChatSenderType = "user" | "assistant" | "system" | "information" | "warning" | "error";
@ -282,7 +282,7 @@ export interface Certification {
}
export interface ChatContext {
type: "job_search" | "job_requirements" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_image" | "rag_search" | "skill_match";
type: "job_search" | "job_requirements" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_resume" | "generate_image" | "rag_search" | "skill_match";
relatedEntityId?: string;
relatedEntityType?: "job" | "candidate" | "employer";
additionalContext?: Record<string, any>;
@ -341,6 +341,20 @@ export interface ChatMessageRagSearch {
content: Array<ChromaDBGetResponse>;
}
export interface ChatMessageResume {
id?: string;
sessionId: string;
senderId?: string;
status: "streaming" | "status" | "done" | "error";
type: "binary" | "text" | "json";
timestamp?: Date;
role: "user" | "assistant" | "system" | "information" | "warning" | "error";
content: string;
tunables?: Tunables;
metadata: ChatMessageMetaData;
resume: string;
}
export interface ChatMessageSkillAssessment {
id?: string;
sessionId: string;
@ -1267,6 +1281,19 @@ export function convertChatMessageRagSearchFromApi(data: any): ChatMessageRagSea
timestamp: data.timestamp ? new Date(data.timestamp) : undefined,
};
}
/**
* Convert ChatMessageResume from API response, parsing date fields
* Date fields: timestamp
*/
export function convertChatMessageResumeFromApi(data: any): ChatMessageResume {
if (!data) return data;
return {
...data,
// Convert timestamp from ISO string to Date
timestamp: data.timestamp ? new Date(data.timestamp) : undefined,
};
}
/**
* Convert ChatMessageSkillAssessment from API response, parsing date fields
* Date fields: timestamp
@ -1678,6 +1705,8 @@ export function convertFromApi<T>(data: any, modelType: string): T {
return convertChatMessageErrorFromApi(data) as T;
case 'ChatMessageRagSearch':
return convertChatMessageRagSearchFromApi(data) as T;
case 'ChatMessageResume':
return convertChatMessageResumeFromApi(data) as T;
case 'ChatMessageSkillAssessment':
return convertChatMessageSkillAssessmentFromApi(data) as T;
case 'ChatMessageStatus':

View File

@ -1,6 +1,6 @@
from __future__ import annotations
from datetime import UTC, datetime
from pydantic import model_validator, Field, BaseModel
from pydantic import model_validator, Field, BaseModel # type: ignore
from typing import (
Dict,
Literal,
@ -23,7 +23,7 @@ import asyncio
import time
import os
import random
from names_dataset import NameDataset, NameWrapper
from names_dataset import NameDataset, NameWrapper # type: ignore
from .base import Agent, agent_registry, LLMMessage
from models import ApiActivityType, Candidate, ChatMessage, ChatMessageError, ChatMessageMetaData, ApiMessageType, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, ChatOptions, ChatSenderType, ApiStatusType, Tunables
@ -128,7 +128,7 @@ logger = logging.getLogger(__name__)
class EthnicNameGenerator:
def __init__(self):
try:
from names_dataset import NameDataset
from names_dataset import NameDataset # type: ignore
self.nd = NameDataset()
except ImportError:
logger.error("NameDataset not available. Please install: pip install names-dataset")
@ -292,7 +292,7 @@ class EthnicNameGenerator:
return names
class GeneratePersona(Agent):
agent_type: Literal["generate_persona"] = "generate_persona"
agent_type: Literal["generate_persona"] = "generate_persona" # type: ignore
_agent_type: ClassVar[str] = agent_type # Add this for registration
agent_persist: bool = False

View File

@ -0,0 +1,198 @@
from __future__ import annotations
from pydantic import model_validator, Field # type: ignore
from typing import (
Dict,
Literal,
ClassVar,
Any,
AsyncGenerator,
List,
Optional
# override
) # NOTE: You must import Optional for late binding to work
import inspect
import re
import json
import traceback
import asyncio
import time
import asyncio
import numpy as np # type: ignore
from logger import logger
from .base import Agent, agent_registry
from models import (ApiActivityType, ApiStatusType, Candidate, ChatMessage, ChatMessageError, ChatMessageResume, ChatMessageStatus, JobRequirements, JobRequirementsMessage, SkillAssessment, SkillStrength, Tunables)
class GenerateResume(Agent):
agent_type: Literal["generate_resume"] = "generate_resume" # type: ignore
_agent_type: ClassVar[str] = agent_type # Add this for registration
def generate_resume_prompt(
self,
skills: List[SkillAssessment]
):
"""
Generate a professional resume based on skill assessment results
Parameters:
- candidate_name (str): The candidate's full name
- candidate_contact_info (dict): Contact details like email, phone, location
- skill_assessment_results (list): List of individual skill assessment results from LLM queries
- original_resume (str): Original resume text for reference
Returns:
- str: System prompt for generating a professional resume
"""
if not self.user:
raise ValueError("User must be bound to agent")
# Extract and organize skill assessment data
skills_by_strength = {
SkillStrength.STRONG: [],
SkillStrength.MODERATE: [],
SkillStrength.WEAK: [],
SkillStrength.NONE: []
}
experience_evidence = {}
# Process each skill assessment
for assessment in skills:
skill = assessment.skill
strength = assessment.evidence_strength
# Add to appropriate strength category
if skill and strength in skills_by_strength:
skills_by_strength[strength].append(skill)
# Collect experience evidence
for evidence in assessment.evidence_details:
source = evidence.source
if source:
if source not in experience_evidence:
experience_evidence[source] = []
experience_evidence[source].append(
{
"skill": skill,
"quote": evidence.quote,
"context": evidence.context
}
)
# Build the system prompt
system_prompt = f"""You are a professional resume writer with expertise in highlighting candidate strengths and experiences.
Create a polished, concise, and ATS-friendly resume for the candidate based on the assessment data provided.
## CANDIDATE INFORMATION:
Name: {self.user.full_name}
Email: {self.user.email or 'N/A'}
Phone: {self.user.phone or 'N/A'}
{f'Location: {json.dumps(self.user.location.model_dump())}' if self.user.location else ''}
## SKILL ASSESSMENT RESULTS:
"""
if len(skills_by_strength[SkillStrength.STRONG]):
system_prompt += f"""\
### Strong Skills (prominent in resume):
{", ".join(skills_by_strength[SkillStrength.STRONG])}
"""
if len(skills_by_strength[SkillStrength.MODERATE]):
system_prompt += f"""\
### Moderate Skills (demonstrated in resume):
{", ".join(skills_by_strength[SkillStrength.MODERATE])}
"""
if len(skills_by_strength[SkillStrength.WEAK]):
system_prompt += f"""\
### Weaker Skills (mentioned or implied):
{", ".join(skills_by_strength[SkillStrength.WEAK])}
"""
system_prompt += """\
## EXPERIENCE EVIDENCE:
"""
# Add experience evidence by source/position
for source, evidences in experience_evidence.items():
system_prompt += f"\n### {source}:\n"
for evidence in evidences:
system_prompt += f"- {evidence['skill']}: {evidence['context']}\n"
# Add instructions for the resume creation
system_prompt += """\
## INSTRUCTIONS:
1. Create a professional resume that emphasizes the candidate's strongest skills and most relevant experiences.
2. Format the resume in a clean, concise, and modern style that will pass ATS systems.
3. Include these sections:
- Professional Summary (highlight strongest skills and experience level)
- Skills (organized by strength)
- Professional Experience (focus on achievements and evidence of the skill)
- If present in material, provide an Education section
- If present in material, provide a Certifications section
- Additional sections as appropriate
4. Use action verbs and quantifiable achievements where possible.
5. Maintain a professional tone throughout.
6. Be concise and impactful - the resume should be 1-2 pages MAXIMUM.
7. Ensure all information is accurate to the original resume - do not embellish or fabricate experiences.
## OUTPUT FORMAT:
Provide the resume in clean markdown format, ready for the candidate to use.
"""
prompt = "Create a tailored professional resume that highlights candidate's skills and experience most relevant to the job requirements. Format it in clean, ATS-friendly markdown. Provide ONLY the resume with no commentary before or after."
return system_prompt, prompt
async def generate_resume(
self, llm: Any, model: str, session_id: str, skills: List[SkillAssessment]
) -> AsyncGenerator[ChatMessage | ChatMessageError, None]:
# Stage 1A: Analyze job requirements
status_message = ChatMessageStatus(
session_id=session_id,
content = f"Analyzing job requirements",
activity=ApiActivityType.THINKING
)
yield status_message
system_prompt, prompt = self.generate_resume_prompt(skills=skills)
generated_message = None
async for generated_message in self.llm_one_shot(llm=llm, model=model, session_id=session_id, prompt=prompt, system_prompt=system_prompt):
if generated_message.status == ApiStatusType.ERROR:
yield generated_message
return
if generated_message.status != ApiStatusType.DONE:
yield generated_message
if not generated_message:
error_message = ChatMessageError(
session_id=session_id,
content="Job requirements analysis failed to generate a response."
)
logger.error(f"⚠️ {error_message.content}")
yield error_message
return
resume_message = ChatMessageResume(
session_id=session_id,
status=ApiStatusType.DONE,
content="Resume generation completed successfully.",
metadata=generated_message.metadata,
resume=generated_message.content,
prompt=prompt,
system_prompt=system_prompt,
)
yield resume_message
logger.info(f"✅ Resume generation completed successfully.")
return
# Register the base agent
agent_registry.register(GenerateResume._agent_type, GenerateResume)

View File

@ -1,5 +1,5 @@
from __future__ import annotations
from pydantic import model_validator, Field
from pydantic import model_validator, Field # type: ignore
from typing import (
Dict,
Literal,
@ -16,7 +16,7 @@ import json
import asyncio
import time
import asyncio
import numpy as np
import numpy as np # type: ignore
from .base import Agent, agent_registry, LLMMessage
from models import ApiActivityType, Candidate, ChatMessage, ChatMessageError, ChatMessageMetaData, ApiMessageType, ChatMessageStatus, ChatMessageUser, ChatOptions, ChatSenderType, ApiStatusType, JobRequirements, JobRequirementsMessage, Tunables
@ -26,7 +26,7 @@ import defines
import backstory_traceback as traceback
class JobRequirementsAgent(Agent):
agent_type: Literal["job_requirements"] = "job_requirements"
agent_type: Literal["job_requirements"] = "job_requirements" # type: ignore
_agent_type: ClassVar[str] = agent_type # Add this for registration
# Stage 1A: Job Analysis Implementation

View File

@ -170,8 +170,8 @@ JSON RESPONSE:"""
yield error_message
return
logger.info(f"🔍 RAG content retrieved: {len(rag_message.content)} bytes")
rag_context = self.get_rag_context(rag_message)
logger.info(f"🔍 RAG content retrieved {len(rag_context)} bytes of context")
system_prompt, prompt = self.generate_skill_assessment_prompt(skill=prompt, rag_context=rag_context)
skill_message = None

View File

@ -272,9 +272,8 @@ class RedisDatabase:
"""Cache skill match assessment"""
try:
# Cache for 1 hour by default
await self.redis.setex(
await self.redis.set(
cache_key,
3600,
json.dumps(assessment.model_dump(mode='json', by_alias=True), default=str) # Serialize with datetime handling
)
logger.debug(f"💾 Skill match cached: {cache_key}")
@ -349,7 +348,7 @@ class RedisDatabase:
try:
# This assumes all candidates belonging to this user need cache invalidation
# You might need to adjust the pattern based on how you associate candidates with users
pattern = f"skill_match:*"
pattern = f"skill_match:{user_id}:*"
keys = await self.redis.keys(pattern)
# Filter keys that belong to candidates owned by this user

View File

@ -1,3 +1,4 @@
import hashlib
import time
from fastapi import FastAPI, HTTPException, Depends, Query, Path, Body, status, APIRouter, Request, BackgroundTasks, File, UploadFile, Form# type: ignore
from fastapi.middleware.cors import CORSMiddleware # type: ignore
@ -75,7 +76,7 @@ import agents
# =============================
from models import (
# API
MOCK_UUID, ApiActivityType, ChatMessageError, ChatMessageSkillAssessment, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, DocumentMessage, DocumentOptions, Job, JobRequirementsMessage, LoginRequest, CreateCandidateRequest, CreateEmployerRequest,
MOCK_UUID, ApiActivityType, ChatMessageError, ChatMessageResume, ChatMessageSkillAssessment, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, DocumentMessage, DocumentOptions, Job, JobRequirements, JobRequirementsMessage, LoginRequest, CreateCandidateRequest, CreateEmployerRequest,
# User models
Candidate, Employer, BaseUserWithType, BaseUser, Guest, Authentication, AuthResponse, CandidateAI,
@ -4518,17 +4519,18 @@ async def get_candidate_skill_match(
candidate = Candidate.model_validate(candidate_data)
# Create cache key for this specific candidate + requirement combination
cache_key = f"skill_match:{candidate_id}:{hash(requirement)}"
requirement_hash = hashlib.md5(requirement.encode()).hexdigest()[:8]
cache_key = f"skill_match:{candidate.id}:{requirement_hash}"
# Get cached assessment if it exists
assessment : SkillAssessment | None = await database.get_cached_skill_match(cache_key)
# Get the latest RAG data update time for the current user
user_rag_update_time = await database.get_user_rag_update_time(current_user.id)
# Determine if we need to regenerate the assessment
cached_date = None
if assessment:
# Get the latest RAG data update time for the current user
user_rag_update_time = await database.get_user_rag_update_time(current_user.id)
updated = assessment.updated_at if "updated_at" in assessment else assessment.created_at
# Check if cached result is still valid
# Regenerate if user's RAG data was updated after cache date
@ -4537,7 +4539,9 @@ async def get_candidate_skill_match(
assessment = None
else:
cached_date = updated
else:
logger.info(f"💾 No cached skill match data: {cache_key}, {candidate.id}, {requirement}")
if assessment:
logger.info(f"✅ Found cached skill match for candidate {candidate.username} against requirement: {requirement}")
logger.info(f"💾 Cached skill match data: {assessment.evidence_strength}")
@ -4656,7 +4660,250 @@ async def get_candidate_skill_match(
).model_dump(mode='json', by_alias=True))]),
media_type="text/event-stream"
)
@api_router.post("/candidates/job-score")
async def get_candidate_job_score(
job_requirements: JobRequirements = Body(...),
skills: List[SkillAssessment] = Body(...),
current_user = Depends(get_current_user_or_guest),
database: RedisDatabase = Depends(get_database)
) -> StreamingResponse:
# Initialize counters
required_skills_total = 0
required_skills_matched = 0
preferred_skills_total = 0
preferred_skills_matched = 0
# Count required technical skills
tech_required = job_requirements.technical_skills.required
required_skills_total += len(tech_required)
# Count preferred technical skills
tech_preferred = job_requirements.technical_skills.preferred
preferred_skills_total += len(tech_preferred)
# Count required experience
exp_required = job_requirements.experience_requirements.required
required_skills_total += len(exp_required)
# Count preferred experience
exp_preferred = job_requirements.experience_requirements.preferred
preferred_skills_total += len(exp_preferred)
# Education requirements count toward required
edu_required = job_requirements.education or []
required_skills_total += len(edu_required)
# Soft skills count toward preferred
soft_skills = job_requirements.soft_skills or []
preferred_skills_total += len(soft_skills)
# Industry knowledge counts toward preferred
certifications = job_requirements.certifications or []
preferred_skills_total += len(certifications)
preferred_attributes = job_requirements.preferred_attributes or []
preferred_skills_total += len(preferred_attributes)
# Check matches in assessment results
for assessment in skills:
evidence_found = assessment.evidence_found
evidence_strength = assessment.evidence_strength
# Consider STRONG and MODERATE evidence as matches
is_match = evidence_found and evidence_strength in ["STRONG", "MODERATE"]
if not is_match:
continue
# Loop through each of the job requirements categories
# and see if the skill matches the required or preferred skills
if assessment.skill in tech_required:
required_skills_matched += 1
elif assessment.skill in tech_preferred:
preferred_skills_matched += 1
elif assessment.skill in exp_required:
required_skills_matched += 1
elif assessment.skill in exp_preferred:
preferred_skills_matched += 1
elif assessment.skill in edu_required:
required_skills_matched += 1
elif assessment.skill in soft_skills:
preferred_skills_matched += 1
elif assessment.skill in certifications:
preferred_skills_matched += 1
elif assessment.skill in preferred_attributes:
preferred_skills_matched += 1
# If no skills were found, return empty statistics
if required_skills_total == 0 and preferred_skills_total == 0:
return create_success_response({
"required_skills": {
"total": 0,
"matched": 0,
"percentage": 0.0,
},
"preferred_skills": {
"total": 0,
"matched": 0,
"percentage": 0.0,
},
"overall_match": {
"total": 0,
"matched": 0,
"percentage": 0.0,
},
})
# Calculate percentages
required_match_percent = (
(required_skills_matched / required_skills_total * 100)
if required_skills_total > 0
else 0
)
preferred_match_percent = (
(preferred_skills_matched / preferred_skills_total * 100)
if preferred_skills_total > 0
else 0
)
overall_total = required_skills_total + preferred_skills_total
overall_matched = required_skills_matched + preferred_skills_matched
overall_match_percent = (
(overall_matched / overall_total * 100) if overall_total > 0 else 0
)
return create_success_response({
"required_skills": {
"total": required_skills_total,
"matched": required_skills_matched,
"percentage": round(required_match_percent, 1),
},
"preferred_skills": {
"total": preferred_skills_total,
"matched": preferred_skills_matched,
"percentage": round(preferred_match_percent, 1),
},
"overall_match": {
"total": overall_total,
"matched": overall_matched,
"percentage": round(overall_match_percent, 1),
},
})
@api_router.post("/candidates/{candidate_id}/generate-resume")
async def generate_resume(
candidate_id: str = Path(...),
skills: List[SkillAssessment] = Body(...),
current_user = Depends(get_current_user_or_guest),
database: RedisDatabase = Depends(get_database)
) -> StreamingResponse:
"""Get skill match for a candidate against a requirement with caching"""
async def message_stream_generator():
candidate_data = await database.get_candidate(candidate_id)
if not candidate_data:
error_message = ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"Candidate with ID '{candidate_id}' not found"
)
yield error_message
return
candidate = Candidate.model_validate(candidate_data)
logger.info(f"🔍 Generating resume for candidate {candidate.username}")
async with entities.get_candidate_entity(candidate=candidate) as candidate_entity:
agent = candidate_entity.get_or_create_agent(agent_type=ChatContextType.GENERATE_RESUME)
if not agent:
error_message = ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"No skill match agent found for this candidate"
)
yield error_message
return
# Generate new skill match
final_message = None
async for generated_message in agent.generate_resume(
llm=llm_manager.get_llm(),
model=defines.model,
session_id=MOCK_UUID,
skills=skills,
):
if generated_message.status == ApiStatusType.ERROR:
logger.error(f"❌ AI generation error: {generated_message.content}")
yield f"data: {json.dumps({'status': 'error'})}\n\n"
return
# If the message is not done, convert it to a ChatMessageBase to remove
# metadata and other unnecessary fields for streaming
if generated_message.status != ApiStatusType.DONE:
if not isinstance(generated_message, ChatMessageStreaming) and not isinstance(generated_message, ChatMessageStatus):
raise TypeError(
f"Expected ChatMessageStreaming or ChatMessageStatus, got {type(generated_message)}"
)
yield generated_message# Convert to ChatMessageBase for streaming
# Store reference to the complete AI message
if generated_message.status == ApiStatusType.DONE:
final_message = generated_message
break
if final_message is None:
error_message = ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"No skill match found for the given requirement"
)
yield error_message
return
if not isinstance(final_message, ChatMessageResume):
error_message = ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content=f"Skill match response is not valid"
)
yield error_message
return
resume : ChatMessageResume = final_message
yield resume
return
try:
async def to_json(method):
try:
async for message in method:
json_data = message.model_dump(mode='json', by_alias=True)
json_str = json.dumps(json_data)
yield f"data: {json_str}\n\n".encode("utf-8")
except Exception as e:
logger.error(backstory_traceback.format_exc())
logger.error(f"Error in to_json conversion: {e}")
return
return StreamingResponse(
to_json(message_stream_generator()),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache, no-store, must-revalidate",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Nginx
"X-Content-Type-Options": "nosniff",
"Access-Control-Allow-Origin": "*", # Adjust for your CORS needs
"Transfer-Encoding": "chunked",
},
)
except Exception as e:
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Document upload error: {e}")
return StreamingResponse(
iter([json.dumps(ChatMessageError(
sessionId=MOCK_UUID, # No session ID for document uploads
content="Failed to generate skill assessment"
).model_dump(mode='json', by_alias=True))]),
media_type="text/event-stream"
)
@rate_limited(guest_per_minute=5, user_per_minute=30, admin_per_minute=100)
@api_router.get("/candidates/{username}/chat-sessions")
async def get_candidate_chat_sessions(

View File

@ -78,16 +78,6 @@ class ChatSenderType(str, Enum):
WARNING = "warning"
ERROR = "error"
class Requirements(BaseModel):
required: List[str] = Field(default_factory=list)
preferred: List[str] = Field(default_factory=list)
@model_validator(mode='before')
def validate_requirements(cls, values):
if not isinstance(values, dict):
raise ValueError("Requirements must be a dictionary with 'required' and 'preferred' keys.")
return values
class SkillStatus(str, Enum):
PENDING = "pending"
COMPLETE = "complete"
@ -142,6 +132,7 @@ class ChatContextType(str, Enum):
GENERAL = "general"
GENERATE_PERSONA = "generate_persona"
GENERATE_PROFILE = "generate_profile"
GENERATE_RESUME = "generate_resume"
GENERATE_IMAGE = "generate_image"
RAG_SEARCH = "rag_search"
SKILL_MATCH = "skill_match"
@ -667,6 +658,16 @@ class GuestCleanupRequest(BaseModel):
"populate_by_name": True
}
class Requirements(BaseModel):
required: List[str] = Field(default_factory=list)
preferred: List[str] = Field(default_factory=list)
@model_validator(mode='before')
def validate_requirements(cls, values):
if not isinstance(values, dict):
raise ValueError("Requirements must be a dictionary with 'required' and 'preferred' keys.")
return values
class JobRequirements(BaseModel):
technical_skills: Requirements = Field(..., alias="technicalSkills")
experience_requirements: Requirements = Field(..., alias="experienceRequirements")
@ -1010,6 +1011,13 @@ class ChatMessageSkillAssessment(ChatMessageUser):
metadata: ChatMessageMetaData = Field(default=ChatMessageMetaData())
skill_assessment: SkillAssessment = Field(..., alias="skillAssessment")
class ChatMessageResume(ChatMessageUser):
role: ChatSenderType = ChatSenderType.ASSISTANT
metadata: ChatMessageMetaData = Field(default=ChatMessageMetaData())
resume: str = Field(..., alias="resume")
system_prompt: Optional[str] = Field(None, alias="systemPrompt")
prompt: Optional[str] = Field(None, alias="prompt")
class GPUInfo(BaseModel):
name: str
memory: int

View File

@ -391,8 +391,7 @@ class ChromaDBFileWatcher(FileSystemEventHandler):
raise
# Log diagnostics
logging.info(f"Input text: {text}")
logging.info(f"Embedding shape: {embedding.shape}, First 5 values: {embedding[:5]}")
logging.debug(f"Embedding shape: {embedding.shape}, First 5 values: {embedding[:5]}")
# Check for invalid embeddings
if embedding.size == 0 or np.any(np.isnan(embedding)) or np.any(np.isinf(embedding)):
@ -402,12 +401,12 @@ class ChromaDBFileWatcher(FileSystemEventHandler):
# Check normalization
norm = np.linalg.norm(embedding)
is_normalized = np.allclose(norm, 1.0, atol=1e-3)
logging.info(f"Embedding norm: {norm}, Is normalized: {is_normalized}")
logging.debug(f"Embedding norm: {norm}, Is normalized: {is_normalized}")
# Normalize if needed
if not is_normalized:
embedding = embedding / norm
logging.info("Embedding normalized manually.")
logging.debug("Embedding normalized manually.")
return embedding