Added interactive resume chat with agentic backend

This commit is contained in:
James Ketr 2025-07-16 17:03:38 -07:00
parent 574d040492
commit 2ac5f5f078
15 changed files with 1062 additions and 61 deletions

View File

@ -0,0 +1,155 @@
/* A4 Portrait simulation for MuiMarkdown */
.a4-document .MuiTypography-root {
font-family: 'Roboto', 'Times New Roman', serif;
}
.a4-document.with-margins {
/* Page break lines - repeating dotted lines every A4 height */
background-image:
repeating-linear-gradient(
#ddd,
#ddd 12mm,
transparent calc(12mm + 1px),
transparent calc(285mm - 1px), /* 297mm - 8mm top/bottom margins */
#ddd calc(285mm),
#ddd 297mm
);
background-size: 100% 297mm;
background-repeat: repeat-y;
}
.a4-document {
/* display: flex; */
/* position: relative; */
/* A4 dimensions: 210mm x 297mm */
width: 210mm;
min-height: 297mm;
/* Alternative pixel-based approach (96 DPI) */
/* width: 794px; */
/* height: 1123px; */
/* Document styling */
background: white;
padding: 12mm; /* 1/4" margins all around */
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
border: 1px solid #e0e0e0;
/* Typography for document feel */
font-family: 'Roboto', 'Times New Roman', serif;
font-size: 12pt;
line-height: 1.6;
color: #333;
/* Ensure proper page breaks for printing */
page-break-after: always;
/* Prevent content overflow */
box-sizing: border-box;
/* overflow: hidden; */
}
/* Container to center the document */
.document-container {
display: flex;
position: relative;
justify-content: center;
min-height: fit-content;
height: fit-content;
background-color: #f5f5f5; /* Light gray background for contrast */
padding: 20px 0;
}
/* Responsive adjustments */
@media screen and (max-width: 900px) {
.a4-document {
width: 95vw;
height: auto;
min-height: 134vw; /* Maintains A4 aspect ratio (297/210 ≈ 1.414) */
margin: 10px auto;
padding: 6vw;
}
}
/* Print styles */
@media print {
.document-container {
background: none;
padding: 0mm !important;
margin: 0mm !important;
}
.a4-document {
width: 210mm;
margin: 0;
padding: 0;
box-shadow: none;
border: none;
page-break-after: always;
}
}
/* Additional MuiMarkdown specific adjustments */
.a4-document h1,
.a4-document h2,
.a4-document h3,
.a4-document h4,
.a4-document h5,
.a4-document h6 {
font-size: 1em;
margin-top: 0.25em;
margin-bottom: 0.25em;
}
/* Put after above so they take precedence */
.a4-document h1,
.a4-document h2 {
font-size: 1.1em;
}
.a4-document p {
margin-bottom: 1em;
text-align: justify;
}
.a4-document ul,
.a4-document ol {
margin-bottom: 1em;
padding-left: 2em;
}
.a4-document blockquote {
margin: 1em 0;
padding-left: 1em;
border-left: 3px solid #ccc;
font-style: italic;
}
.a4-document code {
background-color: #f5f5f5;
padding: 0.2em 0.4em;
border-radius: 3px;
font-family: 'Courier New', monospace;
}
.a4-document pre {
background-color: #f5f5f5;
padding: 1em;
border-radius: 5px;
overflow-x: auto;
margin: 1em 0;
}
.BackstoryResumeHeader {
gap: 1rem;
display: flex;
/* flex-direction: column; */
}
.BackstoryResumeHeader p {
/* border: 3px solid purple; */
margin: 0 !important;
}

View File

@ -0,0 +1,532 @@
import React from 'react';
import { Box, Typography, SxProps, Theme } from '@mui/material';
import {
Email as EmailIcon,
Phone as PhoneIcon,
LocationOn as LocationIcon,
} from '@mui/icons-material';
import { parsePhoneNumberFromString } from 'libphonenumber-js';
import { StyledMarkdown } from 'components/StyledMarkdown';
import * as Types from 'types/types';
import './ResumePreview.css';
// Resume Style Definitions
export interface ResumeStyle {
name: string;
description: string;
headerStyle: SxProps<Theme>;
footerStyle: SxProps<Theme>;
contentStyle: SxProps<Theme>;
markdownStyle: SxProps<Theme>;
color: {
primary: string;
secondary: string;
accent: string;
text: string;
background: string;
};
}
const generateResumeStyles = (): Record<string, ResumeStyle> => {
return {
classic: {
name: 'Classic',
description: 'Traditional, professional serif design',
headerStyle: {
display: 'flex',
flexDirection: 'row',
fontFamily: '"Times New Roman", Times, serif',
borderBottom: '2px solid #2c3e50',
paddingBottom: 2,
marginBottom: 3,
} as SxProps<Theme>,
footerStyle: {
fontFamily: '"Times New Roman", Times, serif',
borderTop: '2px solid #2c3e50',
paddingTop: 2,
display: 'flex',
flexDirection: 'column',
alignItems: 'center',
justifyContent: 'center',
textTransform: 'uppercase',
alignContent: 'center',
fontSize: '0.8rem',
pb: 2,
mb: 2,
} as SxProps<Theme>,
contentStyle: {
fontFamily: '"Times New Roman", Times, serif',
lineHeight: 1.6,
color: '#2c3e50',
} as SxProps<Theme>,
markdownStyle: {
fontFamily: '"Times New Roman", Times, serif',
'& h1, & h2, & h3': {
fontFamily: '"Times New Roman", Times, serif',
color: '#2c3e50',
borderBottom: '1px solid #bdc3c7',
paddingBottom: 1,
marginBottom: 2,
},
'& p, & li': {
lineHeight: 1.6,
marginBottom: 1,
},
'& ul': {
paddingLeft: 3,
},
} as SxProps<Theme>,
color: {
primary: '#2c3e50',
secondary: '#34495e',
accent: '#3498db',
text: '#2c3e50',
background: '#ffffff',
},
},
modern: {
name: 'Modern',
description: 'Clean, minimalist sans-serif layout',
headerStyle: {
display: 'flex',
flexDirection: 'row',
fontFamily: '"Helvetica Neue", Helvetica, Arial, sans-serif',
borderLeft: '4px solid #3498db',
paddingLeft: 2,
marginBottom: 3,
backgroundColor: '#f8f9fa',
padding: 2,
borderRadius: 1,
} as SxProps<Theme>,
footerStyle: {
fontFamily: '"Helvetica Neue", Helvetica, Arial, sans-serif',
borderLeft: '4px solid #3498db',
backgroundColor: '#f8f9fa',
paddingTop: 2,
borderRadius: 1,
display: 'flex',
flexDirection: 'column',
alignItems: 'center',
justifyContent: 'center',
textTransform: 'uppercase',
alignContent: 'center',
fontSize: '0.8rem',
pb: 2,
mb: 2,
} as SxProps<Theme>,
contentStyle: {
fontFamily: '"Helvetica Neue", Helvetica, Arial, sans-serif',
lineHeight: 1.5,
color: '#2c3e50',
} as SxProps<Theme>,
markdownStyle: {
fontFamily: '"Helvetica Neue", Helvetica, Arial, sans-serif',
'& h1, & h2, & h3': {
fontFamily: '"Helvetica Neue", Helvetica, Arial, sans-serif',
color: '#3498db',
fontWeight: 300,
marginBottom: 1.5,
},
'& h1': {
fontSize: '1.75rem',
},
'& h2': {
fontSize: '1.5rem',
},
'& h3': {
fontSize: '1.25rem',
},
'& p, & li': {
lineHeight: 1.5,
marginBottom: 0.75,
},
'& ul': {
paddingLeft: 2.5,
},
} as SxProps<Theme>,
color: {
primary: '#3498db',
secondary: '#2c3e50',
accent: '#e74c3c',
text: '#2c3e50',
background: '#ffffff',
},
},
creative: {
name: 'Creative',
description: 'Colorful, unique design with personality',
headerStyle: {
display: 'flex',
flexDirection: 'row',
fontFamily: '"Montserrat", "Helvetica Neue", Arial, sans-serif',
background: 'linear-gradient(135deg, #667eea 0%, #764ba2 100%)',
color: '#ffffff',
padding: 2.5,
borderRadius: 1.5,
marginBottom: 3,
} as SxProps<Theme>,
footerStyle: {
fontFamily: '"Montserrat", "Helvetica Neue", Arial, sans-serif',
background: 'linear-gradient(135deg, #667eea 0%, #764ba2 100%)',
color: '#ffffff',
paddingTop: 2,
borderRadius: 1.5,
display: 'flex',
flexDirection: 'column',
alignItems: 'center',
justifyContent: 'center',
textTransform: 'uppercase',
alignContent: 'center',
fontSize: '0.8rem',
pb: 2,
mb: 2,
} as SxProps<Theme>,
contentStyle: {
fontFamily: '"Open Sans", Arial, sans-serif',
lineHeight: 1.6,
color: '#444444',
} as SxProps<Theme>,
markdownStyle: {
fontFamily: '"Open Sans", Arial, sans-serif',
'& h1, & h2, & h3': {
fontFamily: '"Montserrat", "Helvetica Neue", Arial, sans-serif',
color: '#667eea',
fontWeight: 600,
marginBottom: 2,
},
'& h1': {
fontSize: '1.5rem',
},
'& h2': {
fontSize: '1.25rem',
},
'& h3': {
fontSize: '1.1rem',
},
'& p, & li': {
lineHeight: 1.6,
marginBottom: 1,
color: '#444444',
},
'& strong': {
color: '#764ba2',
fontWeight: 600,
},
'& ul': {
paddingLeft: 3,
},
} as SxProps<Theme>,
color: {
primary: '#667eea',
secondary: '#764ba2',
accent: '#f093fb',
text: '#444444',
background: '#ffffff',
},
},
corporate: {
name: 'Corporate',
description: 'Formal, structured business format',
headerStyle: {
display: 'flex',
flexDirection: 'row',
fontFamily: '"Arial", sans-serif',
border: '2px solid #34495e',
padding: 2.5,
marginBottom: 3,
backgroundColor: '#ecf0f1',
} as SxProps<Theme>,
footerStyle: {
fontFamily: '"Arial", sans-serif',
border: '2px solid #34495e',
backgroundColor: '#ecf0f1',
paddingTop: 2,
display: 'flex',
flexDirection: 'column',
alignItems: 'center',
justifyContent: 'center',
textTransform: 'uppercase',
alignContent: 'center',
fontSize: '0.8rem',
pb: 2,
mb: 2,
} as SxProps<Theme>,
contentStyle: {
fontFamily: '"Arial", sans-serif',
lineHeight: 1.4,
color: '#2c3e50',
} as SxProps<Theme>,
markdownStyle: {
fontFamily: '"Arial", sans-serif',
'& h1, & h2, & h3': {
fontFamily: '"Arial", sans-serif',
color: '#34495e',
fontWeight: 'bold',
textTransform: 'uppercase',
fontSize: '0.875rem',
letterSpacing: '1px',
marginBottom: 1.5,
borderBottom: '1px solid #bdc3c7',
paddingBottom: 0.5,
},
'& h1': {
fontSize: '1rem',
},
'& h2': {
fontSize: '0.875rem',
},
'& h3': {
fontSize: '0.75rem',
},
'& p, & li': {
lineHeight: 1.4,
marginBottom: 0.75,
fontSize: '0.75rem',
},
'& ul': {
paddingLeft: 2,
},
} as SxProps<Theme>,
color: {
primary: '#34495e',
secondary: '#2c3e50',
accent: '#95a5a6',
text: '#2c3e50',
background: '#ffffff',
},
},
};
};
export const resumeStyles: Record<string, ResumeStyle> = generateResumeStyles();
// Styled Header Component
interface StyledHeaderProps {
candidate: Types.Candidate;
style: ResumeStyle;
}
const StyledHeader: React.FC<StyledHeaderProps> = ({ candidate, style }) => {
const phone = parsePhoneNumberFromString(candidate.phone || '', 'US');
return (
<Box className="BackstoryResumeHeader" sx={style.headerStyle}>
<Box sx={{ display: 'flex', flexDirection: 'column', flexGrow: 1 }}>
<Box sx={{ display: 'flex' }}>
<Typography
variant="h4"
sx={{
fontWeight: 'bold',
mb: 1,
color: style.name === 'creative' ? '#ffffff' : style.color.primary,
fontFamily: 'inherit',
}}
>
{candidate.fullName}
</Typography>
</Box>
<Box
sx={{
display: 'flex',
flexDirection: 'row',
alignItems: 'flex-start',
gap: 1,
}}
>
{candidate.description && (
<Box sx={{ display: 'flex' }}>
<Typography
variant="h6"
sx={{
mb: 2,
fontWeight: 300,
color: style.name === 'creative' ? '#ffffff' : style.color.secondary,
fontFamily: 'inherit',
fontSize: '0.8rem !important',
}}
>
{candidate.description}
</Typography>
</Box>
)}
<Box
sx={{
display: 'flex',
flexDirection: 'column',
flexWrap: 'wrap',
alignContent: 'center',
flexGrow: 1,
minWidth: 'fit-content',
gap: 1,
}}
>
{candidate.email && (
<Box sx={{ display: 'flex', alignItems: 'center', m: 0, p: 0 }}>
<EmailIcon
fontSize="small"
sx={{ mr: 1, color: style.name === 'creative' ? '#ffffff' : style.color.accent }}
/>
<Typography
variant="body2"
sx={{
color: style.name === 'creative' ? '#ffffff' : style.color.text,
fontFamily: 'inherit',
}}
>
{candidate.email}
</Typography>
</Box>
)}
{phone?.isValid() && (
<Box sx={{ display: 'flex', alignItems: 'center' }}>
<PhoneIcon
fontSize="small"
sx={{ mr: 1, color: style.name === 'creative' ? '#ffffff' : style.color.accent }}
/>
<Typography
variant="body2"
sx={{
color: style.name === 'creative' ? '#ffffff' : style.color.text,
fontFamily: 'inherit',
}}
>
{phone.formatInternational()}
</Typography>
</Box>
)}
{candidate.location && (
<Box sx={{ display: 'flex', alignItems: 'center' }}>
<LocationIcon
fontSize="small"
sx={{ mr: 1, color: style.name === 'creative' ? '#ffffff' : style.color.accent }}
/>
<Typography
variant="body2"
sx={{
color: style.name === 'creative' ? '#ffffff' : style.color.text,
fontFamily: 'inherit',
}}
>
{candidate.location.city
? `${candidate.location.city}, ${candidate.location.state}`
: candidate.location.text}
</Typography>
</Box>
)}
</Box>
</Box>
</Box>
</Box>
);
};
// Styled Footer Component
interface StyledFooterProps {
candidate: Types.Candidate;
job?: Types.Job;
style: ResumeStyle;
}
const StyledFooter: React.FC<StyledFooterProps> = ({ candidate, job, style }) => {
return (
<>
<Box
className="BackstoryResumeFooter"
sx={{
...style.footerStyle,
color: style.color.secondary,
}}
>
Dive deeper into my qualifications at Backstory...
<Box
component="img"
src={`/api/1.0/candidates/qr-code/${candidate.id || ''}/${(job && job.id) || ''}`}
alt="QR Code"
className="qr-code"
sx={{ display: 'flex', mt: 1, mb: 1 }}
/>
{candidate?.username
? `${window.location.protocol}://${window.location.host}/u/${candidate?.username}`
: 'backstory'}
</Box>
<Box sx={{ pb: 2 }}>&nbsp;</Box>
</>
);
};
// Main ResumePreview Component
export interface ResumePreviewProps {
resume: Types.Resume;
selectedStyle?: string;
shadeMargins?: boolean;
}
export const ResumePreview: React.FC<ResumePreviewProps> = (props: ResumePreviewProps) => {
const { resume, selectedStyle = 'corporate', shadeMargins = true } = props;
const currentStyle = resumeStyles[selectedStyle] || resumeStyles.corporate;
const job: Types.Job | null = resume.job || null;
const candidate: Types.Candidate | null = resume.candidate || null;
if (!resume || !candidate || !job) {
return (
<Box sx={{ p: 2 }}>
<Typography variant="body1">No resume data available.</Typography>
</Box>
);
}
return (
<Box
className="document-container"
sx={{
...currentStyle.contentStyle,
position: 'relative',
height: 'fit-content',
minHeight: 'fit-content',
display: 'flex',
m: 0,
p: 0,
}}
>
<Box
className={`a4-document ${shadeMargins ? 'with-margins' : ''}`}
sx={{
backgroundColor: currentStyle.color.background,
padding: 5,
minHeight: '100vh',
height: 'fit-content',
boxShadow: '0 0 10px rgba(0,0,0,0.1)',
display: 'flex',
flexDirection: 'column',
}}
>
{/* Custom Header */}
<StyledHeader candidate={candidate} style={currentStyle} />
{/* Styled Markdown Content */}
<Box sx={currentStyle.markdownStyle}>
<StyledMarkdown
sx={{
position: 'relative',
maxHeight: '100%',
display: 'flex',
flexGrow: 1,
flex: 1,
...currentStyle.markdownStyle,
}}
content={resume.resume}
/>
</Box>
{/* QR Code Footer */}
{job && <StyledFooter candidate={candidate} job={job} style={currentStyle} />}
</Box>
</Box>
);
};
export default ResumePreview;

View File

@ -17,7 +17,7 @@ import { ConversationHandle } from 'components/Conversation';
import { Message } from 'components/Message';
import { DeleteConfirmation } from 'components/DeleteConfirmation';
import { CandidateInfo } from 'components/ui/CandidateInfo';
import { useAppState, useSelectedCandidate } from 'hooks/GlobalContext';
import { useAppState, useSelectedCandidate, useSelectedJob } from 'hooks/GlobalContext';
import PropagateLoader from 'react-spinners/PropagateLoader';
import { BackstoryTextField, BackstoryTextFieldRef } from 'components/BackstoryTextField';
import { BackstoryQuery } from 'components/BackstoryQuery';
@ -53,6 +53,11 @@ const defaultMessage: ChatMessage = {
metadata: emptyMetadata,
};
const defaultQuestion: CandidateQuestion = {
question:
'How well does the resume align with the job description? What are the three key strengths and two greatest weaknesses?',
};
interface CandidateChatPageProps {
sx?: SxProps; // Optional styles for the component
}
@ -60,10 +65,11 @@ interface CandidateChatPageProps {
const CandidateChatPage = forwardRef<ConversationHandle, CandidateChatPageProps>(
(props: CandidateChatPageProps, ref): JSX.Element => {
const { resumeId } = useParams<{ resumeId?: string }>();
const { selectedJob, setSelectedJob } = useSelectedJob();
const [resume, setResume] = useState<Resume | null>(null);
const { sx } = props;
const { apiClient } = useAuth();
const { selectedCandidate } = useSelectedCandidate();
const { selectedCandidate, setSelectedCandidate } = useSelectedCandidate();
const [processingMessage, setProcessingMessage] = useState<
ChatMessageStatus | ChatMessageError | null
>(null);
@ -84,6 +90,12 @@ const CandidateChatPage = forwardRef<ConversationHandle, CandidateChatPageProps>
.getResume(resumeId)
.then(resume => {
setResume(resume);
if (resume.candidate && resume.candidate !== selectedCandidate) {
setSelectedCandidate(resume.candidate);
}
if (resume.job && resume.job !== selectedJob) {
setSelectedJob(resume.job);
}
})
.catch(error => {
console.error('Failed to load resume:', error);
@ -121,9 +133,9 @@ const CandidateChatPage = forwardRef<ConversationHandle, CandidateChatPageProps>
type: 'text',
timestamp: new Date(),
extraContext: {
candidateId: resume?.job?.id,
jobId: resume?.job?.id || '',
resumeId: resume?.id || '',
candidateId: resume?.candidate?.id,
jobId: resume?.job?.id,
resumeId: resume?.id,
},
};
@ -152,14 +164,12 @@ const CandidateChatPage = forwardRef<ConversationHandle, CandidateChatPageProps>
onError: (error: string | ChatMessageError): void => {
console.log('onError:', error);
// Type-guard to determine if this is a ChatMessageBase or a string
if (typeof error === 'object' && error !== null && 'content' in error) {
setProcessingMessage(error);
if (typeof error === 'object' && error !== null && 'error' in error) {
setSnack(`Error: ${error.error}`, 'error');
} else if (typeof error === 'string') {
setSnack(`Error: ${error}`, 'error');
} else {
setProcessingMessage({
...defaultMessage,
status: 'error',
content: error,
});
setSnack(`An unknown error occurred: ${JSON.stringify(error)}`, 'error');
}
setStreaming(false);
},
@ -251,9 +261,9 @@ const CandidateChatPage = forwardRef<ConversationHandle, CandidateChatPageProps>
content:
`Welcome to the Backstory Chat about ${selectedCandidate.fullName}` +
(resume && ` and the ${resume.job?.title} position at ${resume.job?.company}`) +
`. Ask any questions you have about ${selectedCandidate.firstName}'${
`. Enter any questions you have about ${selectedCandidate.firstName}'${
selectedCandidate.firstName.slice(-1) !== 's' ? 's' : ''
} resume or skills.`,
} resume or skills, or select from the available questions.`,
metadata: emptyMetadata,
};
@ -376,14 +386,14 @@ const CandidateChatPage = forwardRef<ConversationHandle, CandidateChatPageProps>
<div ref={messagesEndRef} />
</Scrollable>
)}
{selectedCandidate.questions?.length !== 0 && (
<Box sx={{ display: 'flex', flexDirection: 'row', gap: 1, p: 1, flex: 0 }}>
{' '}
{selectedCandidate.questions?.map((q, i) => (
<BackstoryQuery key={i} question={q} submitQuery={handleSubmitQuestion} />
))}
</Box>
)}
<Box sx={{ display: 'flex', flexDirection: 'row', gap: 1, p: 1, flex: 0 }}>
{selectedCandidate.questions?.map((q, i) => (
<BackstoryQuery key={i} question={q} submitQuery={handleSubmitQuestion} />
))}
{resume && (
<BackstoryQuery question={defaultQuestion} submitQuery={handleSubmitQuestion} />
)}
</Box>
{/* Fixed Message Input */}
<Box sx={{ display: 'flex', gap: 1 }}>
<DeleteConfirmation

View File

@ -1,6 +1,6 @@
// Generated TypeScript types from Pydantic models
// Source: src/backend/models.py
// Generated on: 2025-07-16T21:30:37.986984
// Generated on: 2025-07-16T23:28:16.752031
// DO NOT EDIT MANUALLY - This file is auto-generated
// ============================
@ -299,7 +299,7 @@ export interface ChatMessage {
timestamp?: Date;
role: "user" | "assistant" | "system" | "information" | "warning" | "error";
content: string;
extraContext?: Record<string, any>;
extraContext?: ExtraChatContext;
tunables?: Tunables;
metadata: ChatMessageMetaData;
}
@ -350,7 +350,7 @@ export interface ChatMessageResume {
timestamp?: Date;
role: "user" | "assistant" | "system" | "information" | "warning" | "error";
content: string;
extraContext?: Record<string, any>;
extraContext?: ExtraChatContext;
tunables?: Tunables;
metadata: ChatMessageMetaData;
resume: Resume;
@ -365,7 +365,7 @@ export interface ChatMessageSkillAssessment {
timestamp?: Date;
role: "user" | "assistant" | "system" | "information" | "warning" | "error";
content: string;
extraContext?: Record<string, any>;
extraContext?: ExtraChatContext;
tunables?: Tunables;
metadata: ChatMessageMetaData;
skillAssessment: SkillAssessment;
@ -401,7 +401,7 @@ export interface ChatMessageUser {
timestamp?: Date;
role: "user" | "assistant" | "system" | "information" | "warning" | "error";
content: string;
extraContext?: Record<string, any>;
extraContext?: ExtraChatContext;
tunables?: Tunables;
}
@ -616,6 +616,14 @@ export interface EvidenceDetail {
context: string;
}
export interface ExtraChatContext {
jobId?: string;
candidateId?: string;
resumeId?: string;
resume?: string;
isAnswer?: boolean;
}
export interface GPUInfo {
name: string;
memory: number;
@ -1009,7 +1017,7 @@ export interface ResumeMessage {
timestamp?: Date;
role: "user" | "assistant" | "system" | "information" | "warning" | "error";
content: string;
extraContext?: Record<string, any>;
extraContext?: ExtraChatContext;
tunables?: Tunables;
resume: Resume;
}

View File

@ -27,6 +27,7 @@ import defines
from logger import logger
from models import (
ChatResponse,
ExtraChatContext,
Tunables,
ChatMessageUser,
ChatMessage,
@ -644,7 +645,7 @@ Content: {content}
session_id: str,
prompt: str,
database: RedisDatabase,
extra_context: Optional[dict[str, str | int | float | bool]] = None,
extra_context: Optional[ExtraChatContext] = None,
tunables: Optional[Tunables] = None,
temperature=0.7,
) -> AsyncGenerator[ApiMessage, None]:
@ -693,10 +694,6 @@ Content: {content}
rag_message = message
context = self.get_rag_context(rag_message)
if extra_context:
# Add extra context to the messages if provided
context = f"{context}\n\n".join(f"<{key}>\n{value}</{key}>" for key, value in extra_context.items())
# Add the RAG context to the messages if available
if context:
messages.append(

View File

@ -7,7 +7,7 @@ from database.core import RedisDatabase
from .base import Agent, agent_registry
from logger import logger
from models import ApiMessage, Tunables, ApiStatusType
from models import ApiMessage, ExtraChatContext, Tunables, ApiStatusType
system_message = """
@ -41,7 +41,7 @@ class CandidateChat(Agent):
session_id: str,
prompt: str,
database: RedisDatabase,
extra_context: Optional[dict[str, str | int | float | bool]] = None,
extra_context: Optional[ExtraChatContext] = None,
tunables: Optional[Tunables] = None,
temperature=0.7,
) -> AsyncGenerator[ApiMessage, None]:

View File

@ -19,6 +19,7 @@ from models import (
ChatMessageStreaming,
ChatMessageUser,
ChatOptions,
ExtraChatContext,
Tunables,
ApiStatusType,
UsageStats,
@ -41,7 +42,7 @@ class EditResume(Agent):
session_id: str,
prompt: str,
database: RedisDatabase,
extra_context: Optional[dict[str, str | int | float | bool]] = None,
extra_context: Optional[ExtraChatContext] = None,
tunables: Optional[Tunables] = None,
temperature=0.2,
):
@ -74,16 +75,9 @@ class EditResume(Agent):
context = None
rag_message: ChatMessageRagSearch | None = None
if self.user:
logger.info("Generating resume enhanced RAG results")
rag_prompt = ""
if extra_context:
# Add extra context to the messages if provided
rag_prompt = f"{context}\n\n".join(f"<{key}>\n{value}</{key}>" for key, value in extra_context.items())
rag_prompt += f"\n\nPrompt to respond to:\n{prompt}\n"
else:
rag_prompt = prompt
logger.info("Generating results")
message = None
async for message in self.generate_rag_results(session_id=session_id, prompt=rag_prompt, top_k=10):
async for message in self.generate_rag_results(session_id=session_id, prompt=prompt, top_k=10):
if message.status == ApiStatusType.ERROR:
yield message
return
@ -97,16 +91,17 @@ class EditResume(Agent):
rag_message = message
context = self.get_rag_context(rag_message)
if extra_context:
if extra_context and extra_context.resume:
# Add extra context to the messages if provided
context = f"{context}\n\n".join(f"<{key}>\n{value}</{key}>" for key, value in extra_context.items())
context = f"<context>\n{context}\n</context>\n\n<resume>\n{extra_context.resume}</resume>"
elif context:
context = "<context>\n{context}\n</context>"
# Add the RAG context to the messages if available
if context:
messages.append(
LLMMessage(
role="user",
content=f"<|context|>\nThe following is context information about {self.user.full_name}:\n{context}\n</|context|>\n\nPrompt to respond to:\n{prompt}\n",
content=f"{context}\n\nPrompt to respond to:\n{prompt}\n",
)
)
else:
@ -197,7 +192,7 @@ class EditResume(Agent):
session_id: str,
prompt: str,
database: RedisDatabase,
extra_context: Optional[dict[str, str | int | float | bool]] = None,
extra_context: Optional[ExtraChatContext] = None,
tunables: Optional[Tunables] = None,
temperature=0.2,
) -> AsyncGenerator[ApiMessage, None]:
@ -209,7 +204,7 @@ class EditResume(Agent):
You are a professional copy editor. Your task is to edit and enhance the provided resume content based on the requested edits.
**CRITICAL: NEVER INVENT OR FABRICATE ANY INFORMATION**
- DO NOT create any metrics, percentages, dollar amounts, timeframes, or statistics that are not explicitly stated in the original resume or <|context|>
- DO NOT create any metrics, percentages, dollar amounts, timeframes, or statistics that are not explicitly stated in the original resume or <context>
- DO NOT add quantitative claims like "increased by X%", "reduced by X hours", "saved $X", "improved by X%" unless these exact figures are provided
- DO NOT estimate, approximate, or infer numerical data
@ -225,7 +220,7 @@ You are a professional copy editor. Your task is to edit and enhance the provide
- You are provided the current resume content in the <resume> section
- Only make edits that are requested by the user
- Do not add any additional information that is not present in the original resume
- Only add factual information supported by <|context|> or the <resume> content
- Only add factual information supported by <context> or the <resume> content
- DO NOT make assumptions about the candidate's experience or skills
**For impact summaries specifically:**
@ -250,7 +245,7 @@ If the user did not ask a question, return the entire resume with the requested
database=database,
temperature=temperature,
tunables=tunables,
extra_context=extra_context or {},
extra_context=extra_context,
):
if message.status == ApiStatusType.ERROR:
yield message

View File

@ -21,6 +21,7 @@ from models import (
ChatMessageStatus,
ChatMessageStreaming,
ApiStatusType,
ExtraChatContext,
Tunables,
)
from logger import logger
@ -47,7 +48,7 @@ class ImageGenerator(Agent):
session_id: str,
prompt: str,
database: RedisDatabase,
extra_context: Optional[dict[str, str | int | float | bool]] = None,
extra_context: Optional[ExtraChatContext] = None,
tunables: Optional[Tunables] = None,
temperature=0.7,
) -> AsyncGenerator[ChatMessage | ChatMessageStatus | ChatMessageError | ChatMessageStreaming, None]:

View File

@ -28,6 +28,7 @@ from models import (
ChatMessageStatus,
ChatMessageStreaming,
ApiStatusType,
ExtraChatContext,
Tunables,
)
from logger import logger
@ -311,7 +312,7 @@ class GeneratePersona(Agent):
session_id: str,
prompt: str,
database: RedisDatabase,
extra_context: Optional[dict[str, str | int | float | bool]] = None,
extra_context: Optional[ExtraChatContext] = None,
tunables: Optional[Tunables] = None,
temperature=0.7,
) -> AsyncGenerator[ChatMessage | ChatMessageStatus | ChatMessageError | ChatMessageStreaming, None]:

View File

@ -22,6 +22,7 @@ from models import (
ChatMessageStatus,
ChatMessageStreaming,
ApiStatusType,
ExtraChatContext,
Job,
JobRequirements,
JobRequirementsMessage,
@ -163,7 +164,7 @@ Avoid vague categorizations and be precise about whether skills are explicitly r
session_id: str,
prompt: str,
database: RedisDatabase,
extra_context: Optional[dict[str, str | int | float | bool]] = None,
extra_context: Optional[ExtraChatContext] = None,
tunables: Optional[Tunables] = None,
temperature=0.7,
) -> AsyncGenerator[ApiMessage, None]:

View File

@ -6,7 +6,7 @@ from database.core import RedisDatabase
from .base import Agent, agent_registry
from logger import logger
from models import ApiMessage, ApiStatusType, ChatMessageError, ChatMessageRagSearch, Tunables
from models import ApiMessage, ApiStatusType, ChatMessageError, ChatMessageRagSearch, ExtraChatContext, Tunables
class RagSearchChat(Agent):
@ -24,7 +24,7 @@ class RagSearchChat(Agent):
session_id: str,
prompt: str,
database: RedisDatabase,
extra_context: Optional[dict[str, str | int | float | bool]] = None,
extra_context: Optional[ExtraChatContext] = None,
tunables: Optional[Tunables] = None,
temperature=0.7,
) -> AsyncGenerator[ApiMessage, None]:

View File

@ -0,0 +1,291 @@
from __future__ import annotations
import time
from typing import List, Literal, AsyncGenerator, ClassVar, Optional, Any
from database.core import RedisDatabase
from .base import Agent, agent_registry
from logger import logger
from models import (
ApiActivityType,
ApiMessage,
Candidate,
ChatMessage,
ChatMessageError,
ChatMessageMetaData,
ChatMessageRagSearch,
ChatMessageStatus,
ChatMessageStreaming,
ChatMessageUser,
ChatOptions,
ExtraChatContext,
Job,
Resume,
Tunables,
ApiStatusType,
UsageStats,
LLMMessage,
)
class ResumeChat(Agent):
"""
ResumeChat Agent
"""
agent_type: Literal["resume_chat"] = "resume_chat" # type: ignore
_agent_type: ClassVar[str] = agent_type # Add this for registration
async def resume_chat(
self,
llm: Any,
model: str,
session_id: str,
prompt: str,
database: RedisDatabase,
extra_context: Optional[ExtraChatContext] = None,
tunables: Optional[Tunables] = None,
temperature=0.2,
):
if not self.user:
error_message = ChatMessageError(session_id=session_id, content="No user set for chat generation.")
yield error_message
return
if not extra_context or not extra_context.job_id or not extra_context.candidate_id or not extra_context.resume_id:
error_message = ChatMessageError(
session_id=session_id,
content="Missing required extra context: job_id, candidate_id, or resume_id.",
)
yield error_message
return
logger.info(f"Generating resume_chat response to: {prompt} with context: {extra_context.model_dump(exclude_none=True)}")
job_data = await database.get_job(extra_context.job_id)
job = Job.model_validate(job_data) if job_data else None
if not job:
error_message = ChatMessageError(
session_id=session_id, content=f"Job with ID {extra_context.job_id} not found."
)
yield error_message
return
candidate_data = await database.get_candidate(extra_context.candidate_id)
candidate = Candidate.model_validate(candidate_data) if candidate_data else None
if not candidate:
error_message = ChatMessageError(
session_id=session_id, content=f"Candidate with ID {extra_context.candidate_id} not found."
)
yield error_message
return
resume_data = await database.get_resume(user_id=candidate.id, resume_id=extra_context.resume_id)
resume = Resume.model_validate(resume_data) if resume_data else None
if not resume:
error_message = ChatMessageError(
session_id=session_id, content=f"Resume with ID {extra_context.resume_id} not found."
)
yield error_message
return
user_message = ChatMessageUser(
session_id=session_id,
content=prompt,
)
await database.add_chat_message(session_id, user_message.model_dump())
logger.info(f"💬 User message saved to database for session {session_id}")
# Create a pruned down message list based purely on the prompt and responses,
# discarding the full preamble generated by prepare_message
messages: List[LLMMessage] = [LLMMessage(role="system", content=self.system_prompt)]
# Add the conversation history to the messages
messages.extend(
[
LLMMessage(role=m["role"], content=m["content"])
for m in await database.get_recent_chat_messages(session_id=session_id)
]
)
self.user.metrics.generate_count.labels(agent=self.agent_type).inc()
with self.user.metrics.generate_duration.labels(agent=self.agent_type).time():
rag_context = None
rag_message: ChatMessageRagSearch | None = None
if self.user:
logger.info("Generating RAG results")
rag_prompt = prompt
message = None
async for message in self.generate_rag_results(session_id=session_id, prompt=rag_prompt, top_k=10):
if message.status == ApiStatusType.ERROR:
yield message
return
# Only yield messages that are in a streaming state
if message.status == ApiStatusType.STATUS:
yield message
if not isinstance(message, ChatMessageRagSearch):
raise ValueError(f"Expected ChatMessageRagSearch, got {type(rag_message)}")
rag_message = message
rag_context = self.get_rag_context(rag_message)
context = f"""
<job>
The following is the job description for the position {job.title} at {job.company}:
{job.description}
</job>
<resume>
The following is the resume of {candidate.full_name}:
{resume.resume}
</resume>
<candidate>
{candidate.model_dump(include={"first_name", "last_name", "full_name", "email", "location"}, exclude_none=True)}
</candidate>
"""
if rag_context:
context = f"""
<context>
The following is context information about {candidate.full_name} based on the prompt:
{rag_context}
</context>
{context}
"""
messages.append(
LLMMessage(
role="user",
content=f"{context}\n\nPrompt to respond to:\n{prompt}\n",
)
)
# not use_tools
status_message = ChatMessageStatus(
session_id=session_id, activity=ApiActivityType.GENERATING, content="Generating response..."
)
yield status_message
# Set the response for streaming
self.set_optimal_context_size(llm, model, prompt=prompt)
options = ChatOptions(
seed=8911,
num_ctx=self.context_size,
temperature=temperature,
)
logger.info(f"Message options: {options.model_dump(exclude_unset=True)} with {len(messages)} messages")
content = ""
start_time = time.perf_counter()
response = None
async for response in llm.chat_stream(
model=model,
messages=messages,
options={
**options.model_dump(exclude_unset=True),
},
stream=True,
):
if not response:
error_message = ChatMessageError(session_id=session_id, content="No response from LLM.")
yield error_message
return
content += response.content
if not response.finish_reason:
streaming_message = ChatMessageStreaming(
session_id=session_id,
content=response.content,
)
yield streaming_message
if not response:
error_message = ChatMessageError(session_id=session_id, content="No response from LLM.")
yield error_message
return
self.user.collect_metrics(agent=self, response=response)
end_time = time.perf_counter()
chat_message = ChatMessage(
session_id=session_id,
tunables=tunables,
status=ApiStatusType.DONE,
content=content,
metadata=ChatMessageMetaData(
options=options,
usage=UsageStats(
eval_count=response.usage.eval_count,
eval_duration=response.usage.eval_duration,
prompt_eval_count=response.usage.prompt_eval_count,
prompt_eval_duration=response.usage.prompt_eval_duration,
),
rag_results=rag_message.content if rag_message else [],
llm_history=messages,
timers={
"llm_streamed": end_time - start_time,
"llm_with_tools": 0, # Placeholder for tool processing time
},
),
)
await database.add_chat_message(session_id, chat_message.model_dump())
logger.info(f"🤖 Assistent response saved to database for session {session_id}")
# Add the user and chat messages to the conversation
yield chat_message
return
async def generate(
self,
llm: Any,
model: str,
session_id: str,
prompt: str,
database: RedisDatabase,
extra_context: Optional[ExtraChatContext] = None,
tunables: Optional[Tunables] = None,
temperature=0.2,
) -> AsyncGenerator[ApiMessage, None]:
user = self.user
if not user:
logger.error("User is not set for Edit Resume agent.")
raise ValueError("User must be set before generating candidate chat responses.")
self.system_prompt = """
You are a professional staffing agent. Your task is to answer questions about a candidate based on their <resume> and <context>.
**CRITICAL: NEVER INVENT OR FABRICATE ANY INFORMATION**
- DO NOT create any metrics, percentages, dollar amounts, timeframes, or statistics that are not explicitly stated in the <resume> or <context>
- DO NOT add quantitative claims like "increased by X%", "reduced by X hours", "saved $X", "improved by X%" unless these exact figures are provided
- DO NOT estimate, approximate, or infer numerical data
**Guidelines:**
- You are provided the current resume about the candidate in the <resume> section
- You are provided additional context about the question in the <context> section
- You are provided the candidate's name and other details in the <candidate> section
- You are provided the job description in the <job> section
- You must use the provided information to answer questions about the candidate's skills, experience, and qualifications in relation to the job description.
"""
async for message in self.resume_chat(
llm=llm,
model=model,
session_id=session_id,
prompt=prompt,
database=database,
temperature=temperature,
tunables=tunables,
extra_context=extra_context,
):
if message.status == ApiStatusType.ERROR:
yield message
return
yield message
# Register the base agent
agent_registry.register(ResumeChat._agent_type, ResumeChat)

View File

@ -22,6 +22,7 @@ from models import (
ApiStatusType,
ChatMessageStatus,
EvidenceDetail,
ExtraChatContext,
SkillAssessment,
Tunables,
)
@ -116,7 +117,7 @@ JSON RESPONSE:"""
session_id: str,
prompt: str,
database: RedisDatabase,
extra_context: Optional[dict[str, str | int | float | bool]] = None,
extra_context: Optional[ExtraChatContext] = None,
tunables: Optional[Tunables] = None,
temperature=0.7,
) -> AsyncGenerator[ApiMessage, None]:

View File

@ -1169,13 +1169,22 @@ class SkillMatchRequest(BaseModel):
skill: str
regenerate: bool = Field(default=False, description="Whether to regenerate the skill match even if cached")
class ExtraChatContext(BaseModel):
"""Extra context for chat messages"""
job_id: Optional[str] = Field(default=None, alias=str("jobId"))
candidate_id: Optional[str] = Field(default=None, alias=str("candidateId"))
resume_id: Optional[str] = Field(default=None, alias=str("resumeId"))
resume: Optional[str] = Field(default=None)
is_answer: Optional[bool] = Field(default=None, alias=str("isAnswer"))
model_config = ConfigDict(populate_by_name=True)
class ChatMessageUser(ApiMessage):
type: ApiMessageType = ApiMessageType.TEXT
status: ApiStatusType = ApiStatusType.DONE
role: ChatSenderType = ChatSenderType.USER
content: str = ""
extra_context: Optional[Dict[str, str | int | float | bool]] = Field(default=None, alias=str("extraContext"))
extra_context: Optional[ExtraChatContext] = Field(default=None, alias=str("extraContext"))
tunables: Optional[Tunables] = None

View File

@ -87,7 +87,7 @@ async def stream_agent_response(
):
if generated_message.status == ApiStatusType.ERROR:
logger.error(f"❌ AI generation error: {generated_message.content}")
yield f"data: {json.dumps({'status': 'error'})}\n\n"
yield f"data: {json.dumps({'status': 'error', 'error': generated_message.content})}\n\n"
return
# Store reference to the complete AI message