From 2ac5f5f078ed562bdaf8ee3d8a6808cb8178753c Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 16 Jul 2025 17:03:38 -0700 Subject: [PATCH] Added interactive resume chat with agentic backend --- frontend/src/components/ui/ResumePreview.css | 155 ++++++ frontend/src/components/ui/ResumePreview.tsx | 532 +++++++++++++++++++ frontend/src/pages/CandidateChatPage.tsx | 54 +- frontend/src/types/types.ts | 20 +- src/backend/agents/base.py | 7 +- src/backend/agents/candidate_chat.py | 4 +- src/backend/agents/edit_resume.py | 31 +- src/backend/agents/generate_image.py | 3 +- src/backend/agents/generate_persona.py | 3 +- src/backend/agents/job_requirements.py | 3 +- src/backend/agents/rag_search.py | 4 +- src/backend/agents/resume_chat.py | 291 ++++++++++ src/backend/agents/skill_match.py | 3 +- src/backend/models.py | 11 +- src/backend/utils/helpers.py | 2 +- 15 files changed, 1062 insertions(+), 61 deletions(-) create mode 100644 frontend/src/components/ui/ResumePreview.css create mode 100644 frontend/src/components/ui/ResumePreview.tsx create mode 100644 src/backend/agents/resume_chat.py diff --git a/frontend/src/components/ui/ResumePreview.css b/frontend/src/components/ui/ResumePreview.css new file mode 100644 index 0000000..8084486 --- /dev/null +++ b/frontend/src/components/ui/ResumePreview.css @@ -0,0 +1,155 @@ + +/* A4 Portrait simulation for MuiMarkdown */ +.a4-document .MuiTypography-root { + font-family: 'Roboto', 'Times New Roman', serif; +} + +.a4-document.with-margins { + /* Page break lines - repeating dotted lines every A4 height */ + background-image: + repeating-linear-gradient( + #ddd, + #ddd 12mm, + transparent calc(12mm + 1px), + transparent calc(285mm - 1px), /* 297mm - 8mm top/bottom margins */ + #ddd calc(285mm), + #ddd 297mm + ); + background-size: 100% 297mm; + background-repeat: repeat-y; +} + +.a4-document { + /* display: flex; */ + /* position: relative; */ + /* A4 dimensions: 210mm x 297mm */ + width: 210mm; + min-height: 297mm; + + /* Alternative pixel-based approach (96 DPI) */ + /* width: 794px; */ + /* height: 1123px; */ + + /* Document styling */ + background: white; + padding: 12mm; /* 1/4" margins all around */ + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); + border: 1px solid #e0e0e0; + + /* Typography for document feel */ + font-family: 'Roboto', 'Times New Roman', serif; + font-size: 12pt; + line-height: 1.6; + color: #333; + + /* Ensure proper page breaks for printing */ + page-break-after: always; + + /* Prevent content overflow */ + box-sizing: border-box; + /* overflow: hidden; */ +} + +/* Container to center the document */ +.document-container { + display: flex; + position: relative; + justify-content: center; + min-height: fit-content; + height: fit-content; + background-color: #f5f5f5; /* Light gray background for contrast */ + padding: 20px 0; +} + +/* Responsive adjustments */ +@media screen and (max-width: 900px) { + .a4-document { + width: 95vw; + height: auto; + min-height: 134vw; /* Maintains A4 aspect ratio (297/210 ≈ 1.414) */ + margin: 10px auto; + padding: 6vw; + } +} + +/* Print styles */ +@media print { + .document-container { + background: none; + padding: 0mm !important; + margin: 0mm !important; + } + + .a4-document { + width: 210mm; + margin: 0; + padding: 0; + box-shadow: none; + border: none; + page-break-after: always; + } +} + +/* Additional MuiMarkdown specific adjustments */ +.a4-document h1, +.a4-document h2, +.a4-document h3, +.a4-document h4, +.a4-document h5, +.a4-document h6 { + font-size: 1em; + margin-top: 0.25em; + margin-bottom: 0.25em; +} + +/* Put after above so they take precedence */ +.a4-document h1, +.a4-document h2 { + font-size: 1.1em; +} + + +.a4-document p { + margin-bottom: 1em; + text-align: justify; +} + +.a4-document ul, +.a4-document ol { + margin-bottom: 1em; + padding-left: 2em; +} + +.a4-document blockquote { + margin: 1em 0; + padding-left: 1em; + border-left: 3px solid #ccc; + font-style: italic; +} + +.a4-document code { + background-color: #f5f5f5; + padding: 0.2em 0.4em; + border-radius: 3px; + font-family: 'Courier New', monospace; +} + +.a4-document pre { + background-color: #f5f5f5; + padding: 1em; + border-radius: 5px; + overflow-x: auto; + margin: 1em 0; +} + +.BackstoryResumeHeader { + gap: 1rem; + display: flex; + /* flex-direction: column; */ +} + +.BackstoryResumeHeader p { + /* border: 3px solid purple; */ + margin: 0 !important; +} + diff --git a/frontend/src/components/ui/ResumePreview.tsx b/frontend/src/components/ui/ResumePreview.tsx new file mode 100644 index 0000000..00bda6f --- /dev/null +++ b/frontend/src/components/ui/ResumePreview.tsx @@ -0,0 +1,532 @@ +import React from 'react'; +import { Box, Typography, SxProps, Theme } from '@mui/material'; +import { + Email as EmailIcon, + Phone as PhoneIcon, + LocationOn as LocationIcon, +} from '@mui/icons-material'; +import { parsePhoneNumberFromString } from 'libphonenumber-js'; +import { StyledMarkdown } from 'components/StyledMarkdown'; +import * as Types from 'types/types'; + +import './ResumePreview.css'; + +// Resume Style Definitions +export interface ResumeStyle { + name: string; + description: string; + headerStyle: SxProps; + footerStyle: SxProps; + contentStyle: SxProps; + markdownStyle: SxProps; + color: { + primary: string; + secondary: string; + accent: string; + text: string; + background: string; + }; +} + +const generateResumeStyles = (): Record => { + return { + classic: { + name: 'Classic', + description: 'Traditional, professional serif design', + headerStyle: { + display: 'flex', + flexDirection: 'row', + fontFamily: '"Times New Roman", Times, serif', + borderBottom: '2px solid #2c3e50', + paddingBottom: 2, + marginBottom: 3, + } as SxProps, + footerStyle: { + fontFamily: '"Times New Roman", Times, serif', + borderTop: '2px solid #2c3e50', + paddingTop: 2, + display: 'flex', + flexDirection: 'column', + alignItems: 'center', + justifyContent: 'center', + textTransform: 'uppercase', + alignContent: 'center', + fontSize: '0.8rem', + pb: 2, + mb: 2, + } as SxProps, + contentStyle: { + fontFamily: '"Times New Roman", Times, serif', + lineHeight: 1.6, + color: '#2c3e50', + } as SxProps, + markdownStyle: { + fontFamily: '"Times New Roman", Times, serif', + '& h1, & h2, & h3': { + fontFamily: '"Times New Roman", Times, serif', + color: '#2c3e50', + borderBottom: '1px solid #bdc3c7', + paddingBottom: 1, + marginBottom: 2, + }, + '& p, & li': { + lineHeight: 1.6, + marginBottom: 1, + }, + '& ul': { + paddingLeft: 3, + }, + } as SxProps, + color: { + primary: '#2c3e50', + secondary: '#34495e', + accent: '#3498db', + text: '#2c3e50', + background: '#ffffff', + }, + }, + modern: { + name: 'Modern', + description: 'Clean, minimalist sans-serif layout', + headerStyle: { + display: 'flex', + flexDirection: 'row', + fontFamily: '"Helvetica Neue", Helvetica, Arial, sans-serif', + borderLeft: '4px solid #3498db', + paddingLeft: 2, + marginBottom: 3, + backgroundColor: '#f8f9fa', + padding: 2, + borderRadius: 1, + } as SxProps, + footerStyle: { + fontFamily: '"Helvetica Neue", Helvetica, Arial, sans-serif', + borderLeft: '4px solid #3498db', + backgroundColor: '#f8f9fa', + paddingTop: 2, + borderRadius: 1, + display: 'flex', + flexDirection: 'column', + alignItems: 'center', + justifyContent: 'center', + textTransform: 'uppercase', + alignContent: 'center', + fontSize: '0.8rem', + pb: 2, + mb: 2, + } as SxProps, + contentStyle: { + fontFamily: '"Helvetica Neue", Helvetica, Arial, sans-serif', + lineHeight: 1.5, + color: '#2c3e50', + } as SxProps, + markdownStyle: { + fontFamily: '"Helvetica Neue", Helvetica, Arial, sans-serif', + '& h1, & h2, & h3': { + fontFamily: '"Helvetica Neue", Helvetica, Arial, sans-serif', + color: '#3498db', + fontWeight: 300, + marginBottom: 1.5, + }, + '& h1': { + fontSize: '1.75rem', + }, + '& h2': { + fontSize: '1.5rem', + }, + '& h3': { + fontSize: '1.25rem', + }, + '& p, & li': { + lineHeight: 1.5, + marginBottom: 0.75, + }, + '& ul': { + paddingLeft: 2.5, + }, + } as SxProps, + color: { + primary: '#3498db', + secondary: '#2c3e50', + accent: '#e74c3c', + text: '#2c3e50', + background: '#ffffff', + }, + }, + creative: { + name: 'Creative', + description: 'Colorful, unique design with personality', + headerStyle: { + display: 'flex', + flexDirection: 'row', + fontFamily: '"Montserrat", "Helvetica Neue", Arial, sans-serif', + background: 'linear-gradient(135deg, #667eea 0%, #764ba2 100%)', + color: '#ffffff', + padding: 2.5, + borderRadius: 1.5, + marginBottom: 3, + } as SxProps, + footerStyle: { + fontFamily: '"Montserrat", "Helvetica Neue", Arial, sans-serif', + background: 'linear-gradient(135deg, #667eea 0%, #764ba2 100%)', + color: '#ffffff', + paddingTop: 2, + borderRadius: 1.5, + display: 'flex', + flexDirection: 'column', + alignItems: 'center', + justifyContent: 'center', + textTransform: 'uppercase', + alignContent: 'center', + fontSize: '0.8rem', + pb: 2, + mb: 2, + } as SxProps, + contentStyle: { + fontFamily: '"Open Sans", Arial, sans-serif', + lineHeight: 1.6, + color: '#444444', + } as SxProps, + markdownStyle: { + fontFamily: '"Open Sans", Arial, sans-serif', + '& h1, & h2, & h3': { + fontFamily: '"Montserrat", "Helvetica Neue", Arial, sans-serif', + color: '#667eea', + fontWeight: 600, + marginBottom: 2, + }, + '& h1': { + fontSize: '1.5rem', + }, + '& h2': { + fontSize: '1.25rem', + }, + '& h3': { + fontSize: '1.1rem', + }, + '& p, & li': { + lineHeight: 1.6, + marginBottom: 1, + color: '#444444', + }, + '& strong': { + color: '#764ba2', + fontWeight: 600, + }, + '& ul': { + paddingLeft: 3, + }, + } as SxProps, + color: { + primary: '#667eea', + secondary: '#764ba2', + accent: '#f093fb', + text: '#444444', + background: '#ffffff', + }, + }, + corporate: { + name: 'Corporate', + description: 'Formal, structured business format', + headerStyle: { + display: 'flex', + flexDirection: 'row', + fontFamily: '"Arial", sans-serif', + border: '2px solid #34495e', + padding: 2.5, + marginBottom: 3, + backgroundColor: '#ecf0f1', + } as SxProps, + footerStyle: { + fontFamily: '"Arial", sans-serif', + border: '2px solid #34495e', + backgroundColor: '#ecf0f1', + paddingTop: 2, + display: 'flex', + flexDirection: 'column', + alignItems: 'center', + justifyContent: 'center', + textTransform: 'uppercase', + alignContent: 'center', + fontSize: '0.8rem', + pb: 2, + mb: 2, + } as SxProps, + contentStyle: { + fontFamily: '"Arial", sans-serif', + lineHeight: 1.4, + color: '#2c3e50', + } as SxProps, + markdownStyle: { + fontFamily: '"Arial", sans-serif', + '& h1, & h2, & h3': { + fontFamily: '"Arial", sans-serif', + color: '#34495e', + fontWeight: 'bold', + textTransform: 'uppercase', + fontSize: '0.875rem', + letterSpacing: '1px', + marginBottom: 1.5, + borderBottom: '1px solid #bdc3c7', + paddingBottom: 0.5, + }, + '& h1': { + fontSize: '1rem', + }, + '& h2': { + fontSize: '0.875rem', + }, + '& h3': { + fontSize: '0.75rem', + }, + '& p, & li': { + lineHeight: 1.4, + marginBottom: 0.75, + fontSize: '0.75rem', + }, + '& ul': { + paddingLeft: 2, + }, + } as SxProps, + color: { + primary: '#34495e', + secondary: '#2c3e50', + accent: '#95a5a6', + text: '#2c3e50', + background: '#ffffff', + }, + }, + }; +}; + +export const resumeStyles: Record = generateResumeStyles(); + +// Styled Header Component +interface StyledHeaderProps { + candidate: Types.Candidate; + style: ResumeStyle; +} + +const StyledHeader: React.FC = ({ candidate, style }) => { + const phone = parsePhoneNumberFromString(candidate.phone || '', 'US'); + + return ( + + + + + {candidate.fullName} + + + + {candidate.description && ( + + + {candidate.description} + + + )} + + + {candidate.email && ( + + + + {candidate.email} + + + )} + + {phone?.isValid() && ( + + + + {phone.formatInternational()} + + + )} + + {candidate.location && ( + + + + {candidate.location.city + ? `${candidate.location.city}, ${candidate.location.state}` + : candidate.location.text} + + + )} + + + + + ); +}; + +// Styled Footer Component +interface StyledFooterProps { + candidate: Types.Candidate; + job?: Types.Job; + style: ResumeStyle; +} + +const StyledFooter: React.FC = ({ candidate, job, style }) => { + return ( + <> + + Dive deeper into my qualifications at Backstory... + + {candidate?.username + ? `${window.location.protocol}://${window.location.host}/u/${candidate?.username}` + : 'backstory'} + +   + + ); +}; + +// Main ResumePreview Component +export interface ResumePreviewProps { + resume: Types.Resume; + selectedStyle?: string; + shadeMargins?: boolean; +} + +export const ResumePreview: React.FC = (props: ResumePreviewProps) => { + const { resume, selectedStyle = 'corporate', shadeMargins = true } = props; + const currentStyle = resumeStyles[selectedStyle] || resumeStyles.corporate; + const job: Types.Job | null = resume.job || null; + const candidate: Types.Candidate | null = resume.candidate || null; + + if (!resume || !candidate || !job) { + return ( + + No resume data available. + + ); + } + + return ( + + + {/* Custom Header */} + + + {/* Styled Markdown Content */} + + + + + {/* QR Code Footer */} + {job && } + + + ); +}; + +export default ResumePreview; diff --git a/frontend/src/pages/CandidateChatPage.tsx b/frontend/src/pages/CandidateChatPage.tsx index acb9307..ab61cf9 100644 --- a/frontend/src/pages/CandidateChatPage.tsx +++ b/frontend/src/pages/CandidateChatPage.tsx @@ -17,7 +17,7 @@ import { ConversationHandle } from 'components/Conversation'; import { Message } from 'components/Message'; import { DeleteConfirmation } from 'components/DeleteConfirmation'; import { CandidateInfo } from 'components/ui/CandidateInfo'; -import { useAppState, useSelectedCandidate } from 'hooks/GlobalContext'; +import { useAppState, useSelectedCandidate, useSelectedJob } from 'hooks/GlobalContext'; import PropagateLoader from 'react-spinners/PropagateLoader'; import { BackstoryTextField, BackstoryTextFieldRef } from 'components/BackstoryTextField'; import { BackstoryQuery } from 'components/BackstoryQuery'; @@ -53,6 +53,11 @@ const defaultMessage: ChatMessage = { metadata: emptyMetadata, }; +const defaultQuestion: CandidateQuestion = { + question: + 'How well does the resume align with the job description? What are the three key strengths and two greatest weaknesses?', +}; + interface CandidateChatPageProps { sx?: SxProps; // Optional styles for the component } @@ -60,10 +65,11 @@ interface CandidateChatPageProps { const CandidateChatPage = forwardRef( (props: CandidateChatPageProps, ref): JSX.Element => { const { resumeId } = useParams<{ resumeId?: string }>(); + const { selectedJob, setSelectedJob } = useSelectedJob(); const [resume, setResume] = useState(null); const { sx } = props; const { apiClient } = useAuth(); - const { selectedCandidate } = useSelectedCandidate(); + const { selectedCandidate, setSelectedCandidate } = useSelectedCandidate(); const [processingMessage, setProcessingMessage] = useState< ChatMessageStatus | ChatMessageError | null >(null); @@ -84,6 +90,12 @@ const CandidateChatPage = forwardRef .getResume(resumeId) .then(resume => { setResume(resume); + if (resume.candidate && resume.candidate !== selectedCandidate) { + setSelectedCandidate(resume.candidate); + } + if (resume.job && resume.job !== selectedJob) { + setSelectedJob(resume.job); + } }) .catch(error => { console.error('Failed to load resume:', error); @@ -121,9 +133,9 @@ const CandidateChatPage = forwardRef type: 'text', timestamp: new Date(), extraContext: { - candidateId: resume?.job?.id, - jobId: resume?.job?.id || '', - resumeId: resume?.id || '', + candidateId: resume?.candidate?.id, + jobId: resume?.job?.id, + resumeId: resume?.id, }, }; @@ -152,14 +164,12 @@ const CandidateChatPage = forwardRef onError: (error: string | ChatMessageError): void => { console.log('onError:', error); // Type-guard to determine if this is a ChatMessageBase or a string - if (typeof error === 'object' && error !== null && 'content' in error) { - setProcessingMessage(error); + if (typeof error === 'object' && error !== null && 'error' in error) { + setSnack(`Error: ${error.error}`, 'error'); + } else if (typeof error === 'string') { + setSnack(`Error: ${error}`, 'error'); } else { - setProcessingMessage({ - ...defaultMessage, - status: 'error', - content: error, - }); + setSnack(`An unknown error occurred: ${JSON.stringify(error)}`, 'error'); } setStreaming(false); }, @@ -251,9 +261,9 @@ const CandidateChatPage = forwardRef content: `Welcome to the Backstory Chat about ${selectedCandidate.fullName}` + (resume && ` and the ${resume.job?.title} position at ${resume.job?.company}`) + - `. Ask any questions you have about ${selectedCandidate.firstName}'${ + `. Enter any questions you have about ${selectedCandidate.firstName}'${ selectedCandidate.firstName.slice(-1) !== 's' ? 's' : '' - } resume or skills.`, + } resume or skills, or select from the available questions.`, metadata: emptyMetadata, }; @@ -376,14 +386,14 @@ const CandidateChatPage = forwardRef
)} - {selectedCandidate.questions?.length !== 0 && ( - - {' '} - {selectedCandidate.questions?.map((q, i) => ( - - ))} - - )} + + {selectedCandidate.questions?.map((q, i) => ( + + ))} + {resume && ( + + )} + {/* Fixed Message Input */} ; + extraContext?: ExtraChatContext; tunables?: Tunables; metadata: ChatMessageMetaData; } @@ -350,7 +350,7 @@ export interface ChatMessageResume { timestamp?: Date; role: "user" | "assistant" | "system" | "information" | "warning" | "error"; content: string; - extraContext?: Record; + extraContext?: ExtraChatContext; tunables?: Tunables; metadata: ChatMessageMetaData; resume: Resume; @@ -365,7 +365,7 @@ export interface ChatMessageSkillAssessment { timestamp?: Date; role: "user" | "assistant" | "system" | "information" | "warning" | "error"; content: string; - extraContext?: Record; + extraContext?: ExtraChatContext; tunables?: Tunables; metadata: ChatMessageMetaData; skillAssessment: SkillAssessment; @@ -401,7 +401,7 @@ export interface ChatMessageUser { timestamp?: Date; role: "user" | "assistant" | "system" | "information" | "warning" | "error"; content: string; - extraContext?: Record; + extraContext?: ExtraChatContext; tunables?: Tunables; } @@ -616,6 +616,14 @@ export interface EvidenceDetail { context: string; } +export interface ExtraChatContext { + jobId?: string; + candidateId?: string; + resumeId?: string; + resume?: string; + isAnswer?: boolean; +} + export interface GPUInfo { name: string; memory: number; @@ -1009,7 +1017,7 @@ export interface ResumeMessage { timestamp?: Date; role: "user" | "assistant" | "system" | "information" | "warning" | "error"; content: string; - extraContext?: Record; + extraContext?: ExtraChatContext; tunables?: Tunables; resume: Resume; } diff --git a/src/backend/agents/base.py b/src/backend/agents/base.py index 71939fb..3906949 100644 --- a/src/backend/agents/base.py +++ b/src/backend/agents/base.py @@ -27,6 +27,7 @@ import defines from logger import logger from models import ( ChatResponse, + ExtraChatContext, Tunables, ChatMessageUser, ChatMessage, @@ -644,7 +645,7 @@ Content: {content} session_id: str, prompt: str, database: RedisDatabase, - extra_context: Optional[dict[str, str | int | float | bool]] = None, + extra_context: Optional[ExtraChatContext] = None, tunables: Optional[Tunables] = None, temperature=0.7, ) -> AsyncGenerator[ApiMessage, None]: @@ -693,10 +694,6 @@ Content: {content} rag_message = message context = self.get_rag_context(rag_message) - if extra_context: - # Add extra context to the messages if provided - context = f"{context}\n\n".join(f"<{key}>\n{value}" for key, value in extra_context.items()) - # Add the RAG context to the messages if available if context: messages.append( diff --git a/src/backend/agents/candidate_chat.py b/src/backend/agents/candidate_chat.py index a43d4dc..75bca4a 100644 --- a/src/backend/agents/candidate_chat.py +++ b/src/backend/agents/candidate_chat.py @@ -7,7 +7,7 @@ from database.core import RedisDatabase from .base import Agent, agent_registry from logger import logger -from models import ApiMessage, Tunables, ApiStatusType +from models import ApiMessage, ExtraChatContext, Tunables, ApiStatusType system_message = """ @@ -41,7 +41,7 @@ class CandidateChat(Agent): session_id: str, prompt: str, database: RedisDatabase, - extra_context: Optional[dict[str, str | int | float | bool]] = None, + extra_context: Optional[ExtraChatContext] = None, tunables: Optional[Tunables] = None, temperature=0.7, ) -> AsyncGenerator[ApiMessage, None]: diff --git a/src/backend/agents/edit_resume.py b/src/backend/agents/edit_resume.py index 77f4b26..f4e557f 100644 --- a/src/backend/agents/edit_resume.py +++ b/src/backend/agents/edit_resume.py @@ -19,6 +19,7 @@ from models import ( ChatMessageStreaming, ChatMessageUser, ChatOptions, + ExtraChatContext, Tunables, ApiStatusType, UsageStats, @@ -41,7 +42,7 @@ class EditResume(Agent): session_id: str, prompt: str, database: RedisDatabase, - extra_context: Optional[dict[str, str | int | float | bool]] = None, + extra_context: Optional[ExtraChatContext] = None, tunables: Optional[Tunables] = None, temperature=0.2, ): @@ -74,16 +75,9 @@ class EditResume(Agent): context = None rag_message: ChatMessageRagSearch | None = None if self.user: - logger.info("Generating resume enhanced RAG results") - rag_prompt = "" - if extra_context: - # Add extra context to the messages if provided - rag_prompt = f"{context}\n\n".join(f"<{key}>\n{value}" for key, value in extra_context.items()) - rag_prompt += f"\n\nPrompt to respond to:\n{prompt}\n" - else: - rag_prompt = prompt + logger.info("Generating results") message = None - async for message in self.generate_rag_results(session_id=session_id, prompt=rag_prompt, top_k=10): + async for message in self.generate_rag_results(session_id=session_id, prompt=prompt, top_k=10): if message.status == ApiStatusType.ERROR: yield message return @@ -97,16 +91,17 @@ class EditResume(Agent): rag_message = message context = self.get_rag_context(rag_message) - if extra_context: + if extra_context and extra_context.resume: # Add extra context to the messages if provided - context = f"{context}\n\n".join(f"<{key}>\n{value}" for key, value in extra_context.items()) - + context = f"\n{context}\n\n\n\n{extra_context.resume}" + elif context: + context = "\n{context}\n" # Add the RAG context to the messages if available if context: messages.append( LLMMessage( role="user", - content=f"<|context|>\nThe following is context information about {self.user.full_name}:\n{context}\n\n\nPrompt to respond to:\n{prompt}\n", + content=f"{context}\n\nPrompt to respond to:\n{prompt}\n", ) ) else: @@ -197,7 +192,7 @@ class EditResume(Agent): session_id: str, prompt: str, database: RedisDatabase, - extra_context: Optional[dict[str, str | int | float | bool]] = None, + extra_context: Optional[ExtraChatContext] = None, tunables: Optional[Tunables] = None, temperature=0.2, ) -> AsyncGenerator[ApiMessage, None]: @@ -209,7 +204,7 @@ class EditResume(Agent): You are a professional copy editor. Your task is to edit and enhance the provided resume content based on the requested edits. **CRITICAL: NEVER INVENT OR FABRICATE ANY INFORMATION** -- DO NOT create any metrics, percentages, dollar amounts, timeframes, or statistics that are not explicitly stated in the original resume or <|context|> +- DO NOT create any metrics, percentages, dollar amounts, timeframes, or statistics that are not explicitly stated in the original resume or - DO NOT add quantitative claims like "increased by X%", "reduced by X hours", "saved $X", "improved by X%" unless these exact figures are provided - DO NOT estimate, approximate, or infer numerical data @@ -225,7 +220,7 @@ You are a professional copy editor. Your task is to edit and enhance the provide - You are provided the current resume content in the section - Only make edits that are requested by the user - Do not add any additional information that is not present in the original resume -- Only add factual information supported by <|context|> or the content +- Only add factual information supported by or the content - DO NOT make assumptions about the candidate's experience or skills **For impact summaries specifically:** @@ -250,7 +245,7 @@ If the user did not ask a question, return the entire resume with the requested database=database, temperature=temperature, tunables=tunables, - extra_context=extra_context or {}, + extra_context=extra_context, ): if message.status == ApiStatusType.ERROR: yield message diff --git a/src/backend/agents/generate_image.py b/src/backend/agents/generate_image.py index d281a78..30f6fce 100644 --- a/src/backend/agents/generate_image.py +++ b/src/backend/agents/generate_image.py @@ -21,6 +21,7 @@ from models import ( ChatMessageStatus, ChatMessageStreaming, ApiStatusType, + ExtraChatContext, Tunables, ) from logger import logger @@ -47,7 +48,7 @@ class ImageGenerator(Agent): session_id: str, prompt: str, database: RedisDatabase, - extra_context: Optional[dict[str, str | int | float | bool]] = None, + extra_context: Optional[ExtraChatContext] = None, tunables: Optional[Tunables] = None, temperature=0.7, ) -> AsyncGenerator[ChatMessage | ChatMessageStatus | ChatMessageError | ChatMessageStreaming, None]: diff --git a/src/backend/agents/generate_persona.py b/src/backend/agents/generate_persona.py index 1363075..ea23dbf 100644 --- a/src/backend/agents/generate_persona.py +++ b/src/backend/agents/generate_persona.py @@ -28,6 +28,7 @@ from models import ( ChatMessageStatus, ChatMessageStreaming, ApiStatusType, + ExtraChatContext, Tunables, ) from logger import logger @@ -311,7 +312,7 @@ class GeneratePersona(Agent): session_id: str, prompt: str, database: RedisDatabase, - extra_context: Optional[dict[str, str | int | float | bool]] = None, + extra_context: Optional[ExtraChatContext] = None, tunables: Optional[Tunables] = None, temperature=0.7, ) -> AsyncGenerator[ChatMessage | ChatMessageStatus | ChatMessageError | ChatMessageStreaming, None]: diff --git a/src/backend/agents/job_requirements.py b/src/backend/agents/job_requirements.py index 922b192..e98e878 100644 --- a/src/backend/agents/job_requirements.py +++ b/src/backend/agents/job_requirements.py @@ -22,6 +22,7 @@ from models import ( ChatMessageStatus, ChatMessageStreaming, ApiStatusType, + ExtraChatContext, Job, JobRequirements, JobRequirementsMessage, @@ -163,7 +164,7 @@ Avoid vague categorizations and be precise about whether skills are explicitly r session_id: str, prompt: str, database: RedisDatabase, - extra_context: Optional[dict[str, str | int | float | bool]] = None, + extra_context: Optional[ExtraChatContext] = None, tunables: Optional[Tunables] = None, temperature=0.7, ) -> AsyncGenerator[ApiMessage, None]: diff --git a/src/backend/agents/rag_search.py b/src/backend/agents/rag_search.py index d1b1f0d..14c801f 100644 --- a/src/backend/agents/rag_search.py +++ b/src/backend/agents/rag_search.py @@ -6,7 +6,7 @@ from database.core import RedisDatabase from .base import Agent, agent_registry from logger import logger -from models import ApiMessage, ApiStatusType, ChatMessageError, ChatMessageRagSearch, Tunables +from models import ApiMessage, ApiStatusType, ChatMessageError, ChatMessageRagSearch, ExtraChatContext, Tunables class RagSearchChat(Agent): @@ -24,7 +24,7 @@ class RagSearchChat(Agent): session_id: str, prompt: str, database: RedisDatabase, - extra_context: Optional[dict[str, str | int | float | bool]] = None, + extra_context: Optional[ExtraChatContext] = None, tunables: Optional[Tunables] = None, temperature=0.7, ) -> AsyncGenerator[ApiMessage, None]: diff --git a/src/backend/agents/resume_chat.py b/src/backend/agents/resume_chat.py new file mode 100644 index 0000000..56b61f4 --- /dev/null +++ b/src/backend/agents/resume_chat.py @@ -0,0 +1,291 @@ +from __future__ import annotations +import time +from typing import List, Literal, AsyncGenerator, ClassVar, Optional, Any + + +from database.core import RedisDatabase + +from .base import Agent, agent_registry +from logger import logger + +from models import ( + ApiActivityType, + ApiMessage, + Candidate, + ChatMessage, + ChatMessageError, + ChatMessageMetaData, + ChatMessageRagSearch, + ChatMessageStatus, + ChatMessageStreaming, + ChatMessageUser, + ChatOptions, + ExtraChatContext, + Job, + Resume, + Tunables, + ApiStatusType, + UsageStats, + LLMMessage, +) + + +class ResumeChat(Agent): + """ + ResumeChat Agent + """ + + agent_type: Literal["resume_chat"] = "resume_chat" # type: ignore + _agent_type: ClassVar[str] = agent_type # Add this for registration + + async def resume_chat( + self, + llm: Any, + model: str, + session_id: str, + prompt: str, + database: RedisDatabase, + extra_context: Optional[ExtraChatContext] = None, + tunables: Optional[Tunables] = None, + temperature=0.2, + ): + if not self.user: + error_message = ChatMessageError(session_id=session_id, content="No user set for chat generation.") + yield error_message + return + + if not extra_context or not extra_context.job_id or not extra_context.candidate_id or not extra_context.resume_id: + error_message = ChatMessageError( + session_id=session_id, + content="Missing required extra context: job_id, candidate_id, or resume_id.", + ) + yield error_message + return + + logger.info(f"Generating resume_chat response to: {prompt} with context: {extra_context.model_dump(exclude_none=True)}") + + job_data = await database.get_job(extra_context.job_id) + job = Job.model_validate(job_data) if job_data else None + if not job: + error_message = ChatMessageError( + session_id=session_id, content=f"Job with ID {extra_context.job_id} not found." + ) + yield error_message + return + candidate_data = await database.get_candidate(extra_context.candidate_id) + candidate = Candidate.model_validate(candidate_data) if candidate_data else None + if not candidate: + error_message = ChatMessageError( + session_id=session_id, content=f"Candidate with ID {extra_context.candidate_id} not found." + ) + yield error_message + return + resume_data = await database.get_resume(user_id=candidate.id, resume_id=extra_context.resume_id) + resume = Resume.model_validate(resume_data) if resume_data else None + if not resume: + error_message = ChatMessageError( + session_id=session_id, content=f"Resume with ID {extra_context.resume_id} not found." + ) + yield error_message + return + + user_message = ChatMessageUser( + session_id=session_id, + content=prompt, + ) + + await database.add_chat_message(session_id, user_message.model_dump()) + logger.info(f"💬 User message saved to database for session {session_id}") + + # Create a pruned down message list based purely on the prompt and responses, + # discarding the full preamble generated by prepare_message + messages: List[LLMMessage] = [LLMMessage(role="system", content=self.system_prompt)] + # Add the conversation history to the messages + messages.extend( + [ + LLMMessage(role=m["role"], content=m["content"]) + for m in await database.get_recent_chat_messages(session_id=session_id) + ] + ) + + self.user.metrics.generate_count.labels(agent=self.agent_type).inc() + with self.user.metrics.generate_duration.labels(agent=self.agent_type).time(): + rag_context = None + rag_message: ChatMessageRagSearch | None = None + if self.user: + logger.info("Generating RAG results") + rag_prompt = prompt + message = None + async for message in self.generate_rag_results(session_id=session_id, prompt=rag_prompt, top_k=10): + if message.status == ApiStatusType.ERROR: + yield message + return + # Only yield messages that are in a streaming state + if message.status == ApiStatusType.STATUS: + yield message + + if not isinstance(message, ChatMessageRagSearch): + raise ValueError(f"Expected ChatMessageRagSearch, got {type(rag_message)}") + + rag_message = message + rag_context = self.get_rag_context(rag_message) + + context = f""" + +The following is the job description for the position {job.title} at {job.company}: +{job.description} + + + +The following is the resume of {candidate.full_name}: +{resume.resume} + + + +{candidate.model_dump(include={"first_name", "last_name", "full_name", "email", "location"}, exclude_none=True)} + +""" + if rag_context: + context = f""" + +The following is context information about {candidate.full_name} based on the prompt: +{rag_context} + + +{context} +""" + + messages.append( + LLMMessage( + role="user", + content=f"{context}\n\nPrompt to respond to:\n{prompt}\n", + ) + ) + + # not use_tools + status_message = ChatMessageStatus( + session_id=session_id, activity=ApiActivityType.GENERATING, content="Generating response..." + ) + yield status_message + + # Set the response for streaming + self.set_optimal_context_size(llm, model, prompt=prompt) + + options = ChatOptions( + seed=8911, + num_ctx=self.context_size, + temperature=temperature, + ) + logger.info(f"Message options: {options.model_dump(exclude_unset=True)} with {len(messages)} messages") + content = "" + start_time = time.perf_counter() + response = None + async for response in llm.chat_stream( + model=model, + messages=messages, + options={ + **options.model_dump(exclude_unset=True), + }, + stream=True, + ): + if not response: + error_message = ChatMessageError(session_id=session_id, content="No response from LLM.") + yield error_message + return + + content += response.content + + if not response.finish_reason: + streaming_message = ChatMessageStreaming( + session_id=session_id, + content=response.content, + ) + yield streaming_message + + if not response: + error_message = ChatMessageError(session_id=session_id, content="No response from LLM.") + yield error_message + return + + self.user.collect_metrics(agent=self, response=response) + end_time = time.perf_counter() + + chat_message = ChatMessage( + session_id=session_id, + tunables=tunables, + status=ApiStatusType.DONE, + content=content, + metadata=ChatMessageMetaData( + options=options, + usage=UsageStats( + eval_count=response.usage.eval_count, + eval_duration=response.usage.eval_duration, + prompt_eval_count=response.usage.prompt_eval_count, + prompt_eval_duration=response.usage.prompt_eval_duration, + ), + rag_results=rag_message.content if rag_message else [], + llm_history=messages, + timers={ + "llm_streamed": end_time - start_time, + "llm_with_tools": 0, # Placeholder for tool processing time + }, + ), + ) + + await database.add_chat_message(session_id, chat_message.model_dump()) + logger.info(f"🤖 Assistent response saved to database for session {session_id}") + + # Add the user and chat messages to the conversation + yield chat_message + return + + async def generate( + self, + llm: Any, + model: str, + session_id: str, + prompt: str, + database: RedisDatabase, + extra_context: Optional[ExtraChatContext] = None, + tunables: Optional[Tunables] = None, + temperature=0.2, + ) -> AsyncGenerator[ApiMessage, None]: + user = self.user + if not user: + logger.error("User is not set for Edit Resume agent.") + raise ValueError("User must be set before generating candidate chat responses.") + self.system_prompt = """ +You are a professional staffing agent. Your task is to answer questions about a candidate based on their and . + +**CRITICAL: NEVER INVENT OR FABRICATE ANY INFORMATION** +- DO NOT create any metrics, percentages, dollar amounts, timeframes, or statistics that are not explicitly stated in the or +- DO NOT add quantitative claims like "increased by X%", "reduced by X hours", "saved $X", "improved by X%" unless these exact figures are provided +- DO NOT estimate, approximate, or infer numerical data + +**Guidelines:** +- You are provided the current resume about the candidate in the section +- You are provided additional context about the question in the section +- You are provided the candidate's name and other details in the section +- You are provided the job description in the section + +- You must use the provided information to answer questions about the candidate's skills, experience, and qualifications in relation to the job description. +""" + + async for message in self.resume_chat( + llm=llm, + model=model, + session_id=session_id, + prompt=prompt, + database=database, + temperature=temperature, + tunables=tunables, + extra_context=extra_context, + ): + if message.status == ApiStatusType.ERROR: + yield message + return + yield message + + +# Register the base agent +agent_registry.register(ResumeChat._agent_type, ResumeChat) diff --git a/src/backend/agents/skill_match.py b/src/backend/agents/skill_match.py index 3cc3e4a..4fda726 100644 --- a/src/backend/agents/skill_match.py +++ b/src/backend/agents/skill_match.py @@ -22,6 +22,7 @@ from models import ( ApiStatusType, ChatMessageStatus, EvidenceDetail, + ExtraChatContext, SkillAssessment, Tunables, ) @@ -116,7 +117,7 @@ JSON RESPONSE:""" session_id: str, prompt: str, database: RedisDatabase, - extra_context: Optional[dict[str, str | int | float | bool]] = None, + extra_context: Optional[ExtraChatContext] = None, tunables: Optional[Tunables] = None, temperature=0.7, ) -> AsyncGenerator[ApiMessage, None]: diff --git a/src/backend/models.py b/src/backend/models.py index 9d25ce4..748eeea 100644 --- a/src/backend/models.py +++ b/src/backend/models.py @@ -1169,13 +1169,22 @@ class SkillMatchRequest(BaseModel): skill: str regenerate: bool = Field(default=False, description="Whether to regenerate the skill match even if cached") +class ExtraChatContext(BaseModel): + """Extra context for chat messages""" + + job_id: Optional[str] = Field(default=None, alias=str("jobId")) + candidate_id: Optional[str] = Field(default=None, alias=str("candidateId")) + resume_id: Optional[str] = Field(default=None, alias=str("resumeId")) + resume: Optional[str] = Field(default=None) + is_answer: Optional[bool] = Field(default=None, alias=str("isAnswer")) + model_config = ConfigDict(populate_by_name=True) class ChatMessageUser(ApiMessage): type: ApiMessageType = ApiMessageType.TEXT status: ApiStatusType = ApiStatusType.DONE role: ChatSenderType = ChatSenderType.USER content: str = "" - extra_context: Optional[Dict[str, str | int | float | bool]] = Field(default=None, alias=str("extraContext")) + extra_context: Optional[ExtraChatContext] = Field(default=None, alias=str("extraContext")) tunables: Optional[Tunables] = None diff --git a/src/backend/utils/helpers.py b/src/backend/utils/helpers.py index d000a45..677027f 100644 --- a/src/backend/utils/helpers.py +++ b/src/backend/utils/helpers.py @@ -87,7 +87,7 @@ async def stream_agent_response( ): if generated_message.status == ApiStatusType.ERROR: logger.error(f"❌ AI generation error: {generated_message.content}") - yield f"data: {json.dumps({'status': 'error'})}\n\n" + yield f"data: {json.dumps({'status': 'error', 'error': generated_message.content})}\n\n" return # Store reference to the complete AI message