Job submission and parsing from doc working

This commit is contained in:
James Ketr 2025-06-05 14:25:57 -07:00
parent 48e6eeaa71
commit 504985a06b
15 changed files with 803 additions and 315 deletions

View File

@ -111,8 +111,9 @@ const DocumentManager = (props: BackstoryElementProps) => {
try {
// Upload file (replace with actual API call)
const newDocument = await apiClient.uploadCandidateDocument(file);
const controller = apiClient.uploadCandidateDocument(file, { includeInRAG: true, isJobDocument: false });
const newDocument = await controller.promise;
setDocuments(prev => [...prev, newDocument]);
setSnack(`Document uploaded: ${file.name}`, 'success');
@ -147,7 +148,7 @@ const DocumentManager = (props: BackstoryElementProps) => {
// Handle RAG flag toggle
const handleRAGToggle = async (document: Types.Document, includeInRAG: boolean) => {
try {
document.includeInRAG = includeInRAG;
document.options = { includeInRAG };
// Call API to update RAG flag
await apiClient.updateCandidateDocument(document);
@ -290,7 +291,7 @@ const DocumentManager = (props: BackstoryElementProps) => {
size="small"
color={getFileTypeColor(doc.type)}
/>
{doc.includeInRAG && (
{doc.options?.includeInRAG && (
<Chip
label="RAG"
size="small"
@ -309,7 +310,7 @@ const DocumentManager = (props: BackstoryElementProps) => {
<FormControlLabel
control={
<Switch
checked={doc.includeInRAG}
checked={doc.options?.includeInRAG}
onChange={(e) => handleRAGToggle(doc, e.target.checked)}
size="small"
/>

View File

@ -0,0 +1,292 @@
import React, { useState, useEffect, JSX } from 'react';
import {
Box,
Button,
Typography,
Paper,
TextField,
Grid,
InputAdornment,
Dialog,
DialogTitle,
DialogContent,
DialogContentText,
DialogActions,
IconButton,
useTheme,
useMediaQuery
} from '@mui/material';
import {
SyncAlt,
Favorite,
Settings,
Info,
Search,
AutoFixHigh,
Image,
Psychology,
Build
} from '@mui/icons-material';
import { styled } from '@mui/material/styles';
import DescriptionIcon from '@mui/icons-material/Description';
import FileUploadIcon from '@mui/icons-material/FileUpload';
import { useAuth } from 'hooks/AuthContext';
import { useSelectedCandidate, useSelectedJob } from 'hooks/GlobalContext';
import { BackstoryElementProps } from './BackstoryTab';
import { LoginRequired } from 'components/ui/LoginRequired';
import * as Types from 'types/types';
import { StreamingResponse } from 'services/api-client';
const VisuallyHiddenInput = styled('input')({
clip: 'rect(0 0 0 0)',
clipPath: 'inset(50%)',
height: 1,
overflow: 'hidden',
position: 'absolute',
bottom: 0,
left: 0,
whiteSpace: 'nowrap',
width: 1,
});
const getIcon = (type: Types.ApiActivityType) => {
switch (type) {
case 'converting':
return <SyncAlt />;
case 'heartbeat':
return <Favorite />;
case 'system':
return <Settings />;
case 'info':
return <Info />;
case 'searching':
return <Search />;
case 'generating':
return <AutoFixHigh />;
case 'generating_image':
return <Image />;
case 'thinking':
return <Psychology />;
case 'tooling':
return <Build />;
default:
return <Info />; // fallback icon
}
}
const JobManagement = (props: BackstoryElementProps) => {
const { user, apiClient } = useAuth();
const { selectedCandidate } = useSelectedCandidate()
const { selectedJob, setSelectedJob } = useSelectedJob()
const { setSnack, submitQuery } = props;
const backstoryProps = { setSnack, submitQuery };
const theme = useTheme();
const isMobile = useMediaQuery(theme.breakpoints.down('sm'));
const [openUploadDialog, setOpenUploadDialog] = useState<boolean>(false);
const [jobDescription, setJobDescription] = useState<string>('');
const [jobTitle, setJobTitle] = useState<string>('');
const [company, setCompany] = useState<string>('');
const [jobLocation, setJobLocation] = useState<string>('');
const [jobId, setJobId] = useState<string>('');
const [jobStatus, setJobStatus] = useState<string>('');
const [jobStatusIcon, setJobStatusIcon] = useState<JSX.Element>(<></>);
useEffect(() => {
}, [jobTitle, jobDescription, company]);
if (!user?.id) {
return (
<LoginRequired asset="candidate analysis" />
);
}
const jobStatusHandlers = {
onStatus: (status: Types.ChatMessageStatus) => {
setJobStatusIcon(getIcon(status.activity));
setJobStatus(status.content);
},
onMessage: (job: Types.Job) => {
console.log('onMessage - job', job);
setJobDescription(job.description);
setJobTitle(job.title || '');
},
onError: (error: Types.ChatMessageError) => {
console.log('onError', error);
setSnack(error.content, "error");
},
onComplete: () => {
setJobStatusIcon(<></>);
setJobStatus('');
}
};
const documentStatusHandlers = {
...jobStatusHandlers,
onMessage: (document: Types.Document) => {
console.log('onMessage - document', document);
const job: Types.Job = document as any;
setJobDescription(job.description);
setJobTitle(job.title || '');
}
}
const handleJobUpload = async (e: React.ChangeEvent<HTMLInputElement>) => {
if (e.target.files && e.target.files[0]) {
const file = e.target.files[0];
const fileExtension = '.' + file.name.split('.').pop()?.toLowerCase();
let docType : Types.DocumentType | null = null;
switch (fileExtension.substring(1)) {
case "pdf":
docType = "pdf";
break;
case "docx":
docType = "docx";
break;
case "md":
docType = "markdown";
break;
case "txt":
docType = "txt";
break;
}
if (!docType) {
setSnack('Invalid file type. Please upload .txt, .md, .docx, or .pdf files only.', 'error');
return;
}
try {
// Upload file (replace with actual API call)
const controller : StreamingResponse<Types.Document> = apiClient.uploadCandidateDocument(file, { isJobDocument: true}, documentStatusHandlers);
const document : Types.Document | null = await controller.promise;
if (!document) {
return;
}
console.log(`Document id: ${document.id}`)
e.target.value = '';
} catch (error) {
console.error(error);
setSnack('Failed to upload document', 'error');
}
}
};
const handleSave = async () => {
const job : Types.Job = {
ownerId: user?.id || '',
ownerType: 'candidate',
description: jobDescription,
title: jobTitle,
}
apiClient.createJob(job, jobStatusHandlers);
}
const renderJobCreation = () => {
if (!user) {
return <Box>You must </Box>
}
return (<>
<Paper elevation={3} sx={{ p: 3, pt: 1, mt: 0, mb: 4, borderRadius: 2 }}>
<Grid size={{ xs: 12 }}>
<Box sx={{ display: 'flex', alignItems: 'flex-start', mt: 0, mb: 1, flexDirection: "column" }}>
<Typography variant="subtitle1" sx={{ mr: 2 }}>
Job Selection
</Typography>
<Box sx={{display: "flex", flexDirection: "column"}}>
<Button
component="label"
variant="contained"
startIcon={<FileUploadIcon />}
size={isMobile ? "small" : "medium"}>
Upload
<VisuallyHiddenInput
type="file"
accept=".txt,.md,.docx,.pdf"
onChange={handleJobUpload}
/>
</Button>
<Typography variant="caption">Accepted document formats: .pdf, .docx, .txt, or .md</Typography>
</Box>
<Box>{jobStatusIcon} {jobStatus}</Box>
</Box>
<TextField
fullWidth
multiline
rows={12}
placeholder="Enter the job description here..."
variant="outlined"
value={jobDescription}
onChange={(e) => setJobDescription(e.target.value)}
required
InputProps={{
startAdornment: (
<InputAdornment position="start" sx={{ alignSelf: 'flex-start', mt: 1.5 }}>
<DescriptionIcon color="action" />
</InputAdornment>
),
}}
/>
<Typography variant="caption" color="text.secondary" sx={{ mt: 1, display: 'block' }}>
The job description will be used to extract requirements for candidate matching.
</Typography>
</Grid>
<Typography variant="h5" gutterBottom>
Enter Job Details
</Typography>
<Grid container spacing={3}>
<Grid size={{ xs: 12, md: 6 }}>
<TextField
fullWidth
label="Job Title"
variant="outlined"
value={jobTitle}
onChange={(e) => setJobTitle(e.target.value)}
required
margin="normal"
/>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<TextField
fullWidth
label="Company"
variant="outlined"
value={company}
onChange={(e) => setCompany(e.target.value)}
required
margin="normal"
/>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<TextField
fullWidth
label="Job Location"
variant="outlined"
value={jobLocation}
onChange={(e) => setJobLocation(e.target.value)}
margin="normal"
/>
</Grid>
</Grid>
</Paper>
</>);
};
return (
<Box sx={{display: "flex", flexDirection: isMobile ? "column" : "row", gap: 1, m: 0, p: 0}}>
{ selectedJob === null && renderJobCreation() }
{/* { selectedJob !== null && renderJob() } */}
</Box>
);
}
export { JobManagement };

View File

@ -0,0 +1,31 @@
import React from 'react';
import {
Button,
Typography,
Paper,
Container,
} from '@mui/material';
import { useNavigate } from 'react-router-dom';
interface LoginRequiredProps {
asset: string;
}
const LoginRequired = (props: LoginRequiredProps) => {
const { asset } = props;
const navigate = useNavigate();
return (
<Container maxWidth="md">
<Paper elevation={3} sx={{ p: 4, mt: 4, textAlign: 'center' }}>
<Typography variant="h5" gutterBottom>
Please log in to access {asset}
</Typography>
<Button variant="contained" onClick={() => { navigate('/login'); }} color="primary" sx={{ mt: 2 }}>
Log In
</Button>
</Paper>
</Container>
);
};
export { LoginRequired };

View File

@ -19,13 +19,6 @@ import {
useTheme,
Snackbar,
Alert,
Dialog,
DialogTitle,
DialogContent,
DialogContentText,
DialogActions,
InputAdornment,
IconButton
} from '@mui/material';
import SearchIcon from '@mui/icons-material/Search';
import PersonIcon from '@mui/icons-material/Person';
@ -38,9 +31,11 @@ import { Candidate } from "types/types";
import { useNavigate } from 'react-router-dom';
import { BackstoryPageProps } from 'components/BackstoryTab';
import { useAuth } from 'hooks/AuthContext';
import { useSelectedCandidate } from 'hooks/GlobalContext';
import { useSelectedCandidate, useSelectedJob } from 'hooks/GlobalContext';
import { CandidateInfo } from 'components/CandidateInfo';
import { ComingSoon } from 'components/ui/ComingSoon';
import { JobManagement } from 'components/JobManagement';
import { LoginRequired } from 'components/ui/LoginRequired';
// Main component
const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps) => {
@ -48,17 +43,13 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
const { user } = useAuth();
const navigate = useNavigate();
const { selectedCandidate, setSelectedCandidate } = useSelectedCandidate()
const { selectedJob, setSelectedJob } = useSelectedJob()
const { setSnack, submitQuery } = props;
const backstoryProps = { setSnack, submitQuery };
// State management
const [activeStep, setActiveStep] = useState(0);
const [jobDescription, setJobDescription] = useState('');
const [jobTitle, setJobTitle] = useState('');
const [company, setCompany] = useState('');
const [jobLocation, setJobLocation] = useState('');
const [analysisStarted, setAnalysisStarted] = useState(false);
const [error, setError] = useState<string | null>(null);
const [openUploadDialog, setOpenUploadDialog] = useState(false);
const { apiClient } = useAuth();
const [candidates, setCandidates] = useState<Candidate[] | null>(null);
@ -83,7 +74,6 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
}
return result;
});
console.log(candidates);
setCandidates(candidates);
} catch (err) {
setSnack("" + err);
@ -116,11 +106,9 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
return;
}
if (activeStep === 1) {
if (!jobDescription) {
setError('Please provide job description before continuing.');
return;
}
if (activeStep === 1 && !selectedJob) {
setError('Please select a job before continuing.');
return;
}
if (activeStep === 2) {
@ -138,7 +126,7 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
// setActiveStep(0);
setActiveStep(1);
// setSelectedCandidate(null);
setJobDescription('');
setSelectedJob(null);
// setJobTitle('');
// setJobLocation('');
setAnalysisStarted(false);
@ -227,93 +215,21 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
// Render function for the job description step
const renderJobDescription = () => (
<Paper elevation={3} sx={{ p: 3, mt: 3, mb: 4, borderRadius: 2 }}>
<Typography variant="h5" gutterBottom>
Enter Job Details
</Typography>
<Grid container spacing={3}>
<Grid size={{ xs: 12, md: 6 }}>
<TextField
fullWidth
label="Job Title"
variant="outlined"
value={jobTitle}
onChange={(e) => setJobTitle(e.target.value)}
required
margin="normal"
/>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<TextField
fullWidth
label="Company"
variant="outlined"
value={company}
onChange={(e) => setCompany(e.target.value)}
required
margin="normal"
/>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<TextField
fullWidth
label="Job Location"
variant="outlined"
value={jobLocation}
onChange={(e) => setJobLocation(e.target.value)}
margin="normal"
/>
</Grid>
</Grid>
<Grid size={{ xs: 12 }}>
<Box sx={{ display: 'flex', alignItems: 'center', mt: 2, mb: 1 }}>
<Typography variant="subtitle1" sx={{ mr: 2 }}>
Job Selection
</Typography>
<Button
variant="outlined"
startIcon={<FileUploadIcon />}
size="small"
onClick={() => setOpenUploadDialog(true)}
>
Upload
</Button>
</Box>
<TextField
fullWidth
multiline
rows={12}
placeholder="Enter the job description here..."
variant="outlined"
value={jobDescription}
onChange={(e) => setJobDescription(e.target.value)}
required
InputProps={{
startAdornment: (
<InputAdornment position="start" sx={{ alignSelf: 'flex-start', mt: 1.5 }}>
<DescriptionIcon color="action" />
</InputAdornment>
),
}}
/>
<Typography variant="caption" color="text.secondary" sx={{ mt: 1, display: 'block' }}>
The job description will be used to extract requirements for candidate matching.
</Typography>
</Grid>
</Paper>
<Box sx={{ mt: 3 }}>
{selectedCandidate && (
<JobManagement
{...backstoryProps}
/>
)}
</Box>
);
// Render function for the analysis step
const renderAnalysis = () => (
<Box sx={{ mt: 3 }}>
{selectedCandidate && (
{selectedCandidate && selectedJob && (
<JobMatchAnalysis
job={{ title: jobTitle, description: jobDescription, company: company, ownerId: user_id, ownerType: user_type }}
job={selectedJob}
candidate={selectedCandidate}
{...backstoryProps}
/>
@ -330,16 +246,7 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
// If no user is logged in, show message
if (!user?.id) {
return (
<Container maxWidth="md">
<Paper elevation={3} sx={{ p: 4, mt: 4, textAlign: 'center' }}>
<Typography variant="h5" gutterBottom>
Please log in to access candidate analysis
</Typography>
<Button variant="contained" onClick={() => { navigate('/login'); }} color="primary" sx={{ mt: 2 }}>
Log In
</Button>
</Paper>
</Container>
<LoginRequired asset="candidate analysis" />
);
}
@ -417,44 +324,7 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
{error}
</Alert>
</Snackbar>
{/* Upload Dialog */}
<Dialog open={openUploadDialog} onClose={() => setOpenUploadDialog(false)}>
<DialogTitle>Upload Job Description</DialogTitle>
<DialogContent>
<DialogContentText>
Upload a job description document (.pdf, .docx, .txt, or .md)
</DialogContentText>
<Box sx={{ mt: 2, textAlign: 'center' }}>
<Button
variant="outlined"
component="label"
startIcon={<FileUploadIcon />}
sx={{ mt: 1 }}
>
Choose File
<input
type="file"
hidden
accept=".pdf,.docx,.txt,.md"
onChange={() => {
// This would handle file upload in a real application
setOpenUploadDialog(false);
// Mock setting job description from file
setJobDescription(
"Senior Frontend Developer\n\nRequired Skills:\n- 5+ years of React development experience\n- Strong TypeScript skills\n- Experience with RESTful APIs\n- Knowledge of state management solutions (Redux, Context API)\n- Experience with CI/CD pipelines\n- Cloud platform experience (AWS, Azure, GCP)\n\nResponsibilities:\n- Develop and maintain frontend applications using React and TypeScript\n- Collaborate with backend developers to integrate APIs\n- Optimize applications for maximum speed and scalability\n- Design and implement new features and functionality\n- Ensure the technical feasibility of UI/UX designs"
);
setJobTitle("Senior Frontend Developer");
setJobLocation("Remote");
}}
/>
</Button>
</Box>
</DialogContent>
<DialogActions>
<Button onClick={() => setOpenUploadDialog(false)}>Cancel</Button>
</DialogActions>
</Dialog>
</Container>
);
};

View File

@ -19,7 +19,8 @@ import {
extractApiData,
// ApiResponse,
PaginatedResponse,
PaginatedRequest
PaginatedRequest,
toSnakeCase
} from 'types/conversion';
// Import generated date conversion functions
@ -33,17 +34,20 @@ import {
convertFromApi,
convertArrayFromApi
} from 'types/types';
import { json } from 'stream/consumers';
// ============================
// Streaming Types and Interfaces
// ============================
interface StreamingOptions {
interface StreamingOptions<T = Types.ChatMessage> {
method?: string,
headers?: Record<string, any>,
onStatus?: (status: Types.ChatMessageStatus) => void;
onMessage?: (message: Types.ChatMessage) => void;
onMessage?: (message: T) => void;
onStreaming?: (chunk: Types.ChatMessageStreaming) => void;
onComplete?: () => void;
onError?: (error: string | Types.ChatMessageError) => void;
onError?: (error: Types.ChatMessageError) => void;
onWarn?: (warning: string) => void;
signal?: AbortSignal;
}
@ -53,10 +57,10 @@ interface DeleteCandidateResponse {
message: string;
}
interface StreamingResponse {
interface StreamingResponse<T = Types.ChatMessage> {
messageId: string;
cancel: () => void;
promise: Promise<Types.ChatMessage[]>;
promise: Promise<T>;
}
interface CreateCandidateAIResponse {
@ -618,14 +622,9 @@ class ApiClient {
// Job Methods with Date Conversion
// ============================
async createJob(job: Omit<Types.Job, 'id' | 'datePosted' | 'views' | 'applicationCount'>): Promise<Types.Job> {
const response = await fetch(`${this.baseUrl}/jobs`, {
method: 'POST',
headers: this.defaultHeaders,
body: JSON.stringify(formatApiRequest(job))
});
return this.handleApiResponseWithConversion<Types.Job>(response, 'Job');
createJob(job: Omit<Types.Job, 'id' | 'datePosted' | 'views' | 'applicationCount'>, streamingOptions?: StreamingOptions<Types.Job>): StreamingResponse<Types.Job> {
const body = JSON.stringify(formatApiRequest(job));
return this.streamify<Types.Job>(`/jobs`, body, streamingOptions);
}
async getJob(id: string): Promise<Types.Job> {
@ -816,27 +815,41 @@ class ApiClient {
return result;
}
/****
* Document CRUD API
/**
uploadCandidateDocument
usage:
const controller : StreamingResponse<Types.Document> = uploadCandidateDocument(...);
const document : Types.Document = await controller.promise;
console.log(`Document id: ${document.id}`)
*/
async uploadCandidateDocument(file: File, includeInRag: boolean = true): Promise<Types.Document> {
uploadCandidateDocument(file: File, options: Types.DocumentOptions, streamingOptions?: StreamingOptions<Types.Document>): StreamingResponse<Types.Document> {
const convertedOptions = toSnakeCase(options);
const formData = new FormData()
formData.append('file', file);
formData.append('filename', file.name);
formData.append('include_in_rag', includeInRag.toString());
const response = await fetch(`${this.baseUrl}/candidates/documents/upload`, {
method: 'POST',
formData.append('options', JSON.stringify(convertedOptions));
streamingOptions = {
...streamingOptions,
headers: {
// Don't set Content-Type - browser will set it automatically with boundary
'Authorization': this.defaultHeaders['Authorization']
},
body: formData
});
}
};
return this.streamify<Types.Document>('/candidates/documents/upload', formData, streamingOptions);
// {
// method: 'POST',
// headers: {
// // Don't set Content-Type - browser will set it automatically with boundary
// 'Authorization': this.defaultHeaders['Authorization']
// },
// body: formData
// });
const result = await handleApiResponse<Types.Document>(response);
// const result = await handleApiResponse<Types.Document>(response);
return result;
// return result;
}
async candidateMatchForRequirement(candidate_id: string, requirement: string) : Promise<Types.SkillMatch> {
@ -854,7 +867,7 @@ class ApiClient {
async updateCandidateDocument(document: Types.Document) : Promise<Types.Document> {
const request : Types.DocumentUpdateRequest = {
filename: document.filename,
includeInRAG: document.includeInRAG
options: document.options
}
const response = await fetch(`${this.baseUrl}/candidates/documents/${document.id}`, {
method: 'PATCH',
@ -976,28 +989,36 @@ class ApiClient {
}
/**
* Send message with streaming response support and date conversion
* streamify<T = Types.ChatMessage[]>
* @param api API entrypoint
* @param data Data to be attached to request Body
* @param options callbacks, headers, and method
* @returns
*/
sendMessageStream(
chatMessage: Types.ChatMessageUser,
options: StreamingOptions = {}
): StreamingResponse {
streamify<T = Types.ChatMessage[]>(api: string, data: BodyInit, options: StreamingOptions<T> = {}) : StreamingResponse<T> {
const abortController = new AbortController();
const signal = options.signal || abortController.signal;
let messageId = '';
const headers = options.headers || null;
const method = options.method || 'POST';
const promise = new Promise<Types.ChatMessage[]>(async (resolve, reject) => {
let messageId = '';
let finalMessage : T | null = null;
console.log('streamify: ', {
api,
method,
headers,
body: data
});
const promise = new Promise<T>(async (resolve, reject) => {
try {
const request = formatApiRequest(chatMessage);
const response = await fetch(`${this.baseUrl}/chat/sessions/${chatMessage.sessionId}/messages/stream`, {
method: 'POST',
headers: {
const response = await fetch(`${this.baseUrl}${api}`, {
method,
headers: headers || {
...this.defaultHeaders,
'Accept': 'text/event-stream',
'Cache-Control': 'no-cache'
'Cache-Control': 'no-cache',
},
body: JSON.stringify(request),
body: data,
signal
});
@ -1013,13 +1034,12 @@ class ApiClient {
const decoder = new TextDecoder();
let buffer = '';
let streamingMessage: Types.ChatMessageStreaming | null = null;
const incomingMessageList: Types.ChatMessage[] = [];
try {
while (true) {
const { done, value } = await reader.read();
if (done) {
// Stream ended naturally - create final message
// Stream ended naturally
break;
}
@ -1037,12 +1057,9 @@ class ApiClient {
const data = line.slice(5).trim();
const incoming: any = JSON.parse(data);
console.log(incoming.status, incoming);
// Handle different status types
switch (incoming.status) {
case 'streaming':
console.log(incoming.status, incoming);
const streaming = Types.convertChatMessageStreamingFromApi(incoming);
if (streamingMessage === null) {
streamingMessage = {...streaming};
@ -1066,8 +1083,8 @@ class ApiClient {
break;
case 'done':
const message = Types.convertChatMessageFromApi(incoming);
incomingMessageList.push(message);
const message = Types.convertApiMessageFromApi(incoming) as T;
finalMessage = message as any;
try {
options.onMessage?.(message);
} catch (error) {
@ -1090,13 +1107,14 @@ class ApiClient {
}
options.onComplete?.();
resolve(incomingMessageList);
resolve(finalMessage as T);
} catch (error) {
if (signal.aborted) {
options.onComplete?.();
reject(new Error('Request was aborted'));
} else {
options.onError?.((error as Error).message);
console.error(error);
options.onError?.({ sessionId: '', status: 'error', type: 'text', content: (error as Error).message});
options.onComplete?.();
reject(error);
}
@ -1109,6 +1127,17 @@ class ApiClient {
promise
};
}
/**
* Send message with streaming response support and date conversion
*/
sendMessageStream(
chatMessage: Types.ChatMessageUser,
options: StreamingOptions = {}
): StreamingResponse {
const body = JSON.stringify(formatApiRequest(chatMessage));
return this.streamify(`/chat/sessions/${chatMessage.sessionId}/messages/stream`, body, options)
}
/**
* Get persisted chat messages for a session with date conversion

View File

@ -1,6 +1,6 @@
// Generated TypeScript types from Pydantic models
// Source: src/backend/models.py
// Generated on: 2025-06-05T00:24:02.132276
// Generated on: 2025-06-05T20:17:00.575243
// DO NOT EDIT MANUALLY - This file is auto-generated
// ============================
@ -11,7 +11,7 @@ export type AIModelType = "qwen2.5" | "flux-schnell";
export type ActivityType = "login" | "search" | "view_job" | "apply_job" | "message" | "update_profile" | "chat";
export type ApiActivityType = "system" | "info" | "searching" | "thinking" | "generating" | "generating_image" | "tooling" | "heartbeat";
export type ApiActivityType = "system" | "info" | "searching" | "thinking" | "generating" | "converting" | "generating_image" | "tooling" | "heartbeat";
export type ApiMessageType = "binary" | "text" | "json";
@ -351,7 +351,7 @@ export interface ChatMessageStatus {
status: "streaming" | "status" | "done" | "error";
type: "binary" | "text" | "json";
timestamp?: Date;
activity: "system" | "info" | "searching" | "thinking" | "generating" | "generating_image" | "tooling" | "heartbeat";
activity: "system" | "info" | "searching" | "thinking" | "generating" | "converting" | "generating_image" | "tooling" | "heartbeat";
content: any;
}
@ -477,7 +477,7 @@ export interface Document {
type: "pdf" | "docx" | "txt" | "markdown" | "image";
size: number;
uploadDate?: Date;
includeInRAG: boolean;
options?: DocumentOptions;
ragChunks?: number;
}
@ -494,9 +494,24 @@ export interface DocumentListResponse {
total: number;
}
export interface DocumentMessage {
id?: string;
sessionId: string;
senderId?: string;
status: "streaming" | "status" | "done" | "error";
type: "binary" | "text" | "json";
timestamp?: Date;
document: Document;
}
export interface DocumentOptions {
includeInRAG?: boolean;
isJobDocument?: boolean;
}
export interface DocumentUpdateRequest {
filename?: string;
includeInRAG?: boolean;
options?: DocumentOptions;
}
export interface EditHistory {
@ -1227,6 +1242,19 @@ export function convertDocumentFromApi(data: any): Document {
uploadDate: data.uploadDate ? new Date(data.uploadDate) : undefined,
};
}
/**
* Convert DocumentMessage from API response, parsing date fields
* Date fields: timestamp
*/
export function convertDocumentMessageFromApi(data: any): DocumentMessage {
if (!data) return data;
return {
...data,
// Convert timestamp from ISO string to Date
timestamp: data.timestamp ? new Date(data.timestamp) : undefined,
};
}
/**
* Convert EditHistory from API response, parsing date fields
* Date fields: editedAt
@ -1478,6 +1506,8 @@ export function convertFromApi<T>(data: any, modelType: string): T {
return convertDataSourceConfigurationFromApi(data) as T;
case 'Document':
return convertDocumentFromApi(data) as T;
case 'DocumentMessage':
return convertDocumentMessageFromApi(data) as T;
case 'EditHistory':
return convertEditHistoryFromApi(data) as T;
case 'Education':

View File

@ -1,5 +1,4 @@
from __future__ import annotations
import traceback
from pydantic import BaseModel, Field, model_validator # type: ignore
from typing import (
Literal,
@ -30,6 +29,7 @@ import defines
from .registry import agent_registry
from metrics import Metrics
import model_cast
import backstory_traceback as traceback
from rag import ( ChromaDBGetResponse )

View File

@ -16,7 +16,6 @@ import inspect
import random
import re
import json
import traceback
import asyncio
import time
import asyncio
@ -29,6 +28,7 @@ from models import ActivityType, ApiActivityType, Candidate, ChatMessage, ChatMe
import model_cast
from logger import logger
import defines
import backstory_traceback as traceback
from image_generator.image_model_cache import ImageModelCache
from image_generator.profile_image import generate_image, ImageRequest

View File

@ -17,7 +17,6 @@ import inspect
import random
import re
import json
import traceback
import asyncio
import time
import asyncio
@ -31,6 +30,7 @@ from models import ApiActivityType, Candidate, ChatMessage, ChatMessageError, Ch
import model_cast
from logger import logger
import defines
import backstory_traceback as traceback
seed = int(time.time())
random.seed(seed)

View File

@ -13,17 +13,17 @@ from typing import (
import inspect
import re
import json
import traceback
import asyncio
import time
import asyncio
import numpy as np # type: ignore
from .base import Agent, agent_registry, LLMMessage
from models import Candidate, ChatMessage, ChatMessageError, ChatMessageMetaData, ApiMessageType, ChatMessageStatus, ChatMessageUser, ChatOptions, ChatSenderType, ApiStatusType, JobRequirements, JobRequirementsMessage, Tunables
from models import ApiActivityType, Candidate, ChatMessage, ChatMessageError, ChatMessageMetaData, ApiMessageType, ChatMessageStatus, ChatMessageUser, ChatOptions, ChatSenderType, ApiStatusType, JobRequirements, JobRequirementsMessage, Tunables
import model_cast
from logger import logger
import defines
import backstory_traceback as traceback
class JobRequirementsAgent(Agent):
agent_type: Literal["job_requirements"] = "job_requirements" # type: ignore
@ -110,7 +110,9 @@ class JobRequirementsAgent(Agent):
# Stage 1A: Analyze job requirements
status_message = ChatMessageStatus(
session_id=session_id,
content = f"Analyzing job requirements")
content = f"Analyzing job requirements",
activity=ApiActivityType.THINKING
)
yield status_message
generated_message = None
@ -122,20 +124,21 @@ class JobRequirementsAgent(Agent):
yield generated_message
if not generated_message:
status_message = ChatMessageStatus(
error_message = ChatMessageError(
session_id=session_id,
content="Job requirements analysis failed to generate a response.")
logger.error(f"⚠️ {status_message.content}")
yield status_message
content="Job requirements analysis failed to generate a response."
)
logger.error(f"⚠️ {error_message.content}")
yield error_message
return
json_str = self.extract_json_from_text(generated_message.content)
job_requirements : JobRequirements | None = None
job_requirements_data = ""
company_name = ""
job_summary = ""
job_title = ""
try:
json_str = self.extract_json_from_text(generated_message.content)
job_requirements_data = json.loads(json_str)
job_requirements_data = job_requirements_data.get("job_requirements", None)
job_title = job_requirements_data.get("job_title", "")
@ -169,7 +172,8 @@ class JobRequirementsAgent(Agent):
requirements=job_requirements,
company=company_name,
title=job_title,
summary=job_summary
summary=job_summary,
description=prompt,
)
yield job_requirements_message

View File

@ -13,7 +13,6 @@ from typing import (
import inspect
import re
import json
import traceback
import asyncio
import time
import asyncio

View File

@ -0,0 +1,55 @@
import traceback
import os
import sys
import defines
def filter_traceback(tb, app_path=None, module_name=None):
"""
Filter traceback to include only frames from the specified application path or module.
Args:
tb: Traceback object (e.g., from sys.exc_info()[2])
app_path: Directory path of your application (e.g., '/path/to/your/app')
module_name: Name of the module to include (e.g., 'myapp')
Returns:
Formatted traceback string with filtered frames.
"""
# Extract stack frames
stack = traceback.extract_tb(tb)
# Filter frames based on app_path or module_name
filtered_stack = []
for frame in stack:
# frame.filename is the full path to the file
# frame.name is the function name, frame.lineno is the line number
if app_path and os.path.realpath(frame.filename).startswith(os.path.realpath(app_path)):
filtered_stack.append(frame)
elif module_name and frame.filename.startswith(module_name):
filtered_stack.append(frame)
# Format the filtered stack trace
formatted_stack = traceback.format_list(filtered_stack)
# Get exception info to include the exception type and message
exc_type, exc_value, _ = sys.exc_info()
formatted_exc = traceback.format_exception_only(exc_type, exc_value)
# Combine the filtered stack trace with the exception message
return ''.join(formatted_stack + formatted_exc)
def format_exc(app_path=defines.app_path, module_name=None):
"""
Custom version of traceback.format_exc() that filters stack frames.
Args:
app_path: Directory path of your application
module_name: Name of the module to include
Returns:
Formatted traceback string with only relevant frames.
"""
exc_type, exc_value, exc_tb = sys.exc_info()
if exc_tb is None:
return "" # No traceback available
return filter_traceback(exc_tb, app_path=app_path, module_name=module_name)

View File

@ -61,3 +61,6 @@ host = os.getenv("BACKSTORY_HOST", "0.0.0.0")
port = int(os.getenv("BACKSTORY_PORT", "8911"))
api_prefix = "/api/1.0"
debug=os.getenv("BACKSTORY_DEBUG", "false").lower() in ("true", "1", "yes")
# Used for filtering tracebacks
app_path="/opt/backstory/src/backend"

View File

@ -25,7 +25,6 @@ import re
import asyncio
import signal
import json
import traceback
import uuid
import logging
@ -38,6 +37,7 @@ from prometheus_fastapi_instrumentator import Instrumentator # type: ignore
from prometheus_client import CollectorRegistry, Counter # type: ignore
import secrets
import os
import backstory_traceback
# =============================
# Import custom modules
@ -64,7 +64,7 @@ import agents
# =============================
from models import (
# API
ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, Job, LoginRequest, CreateCandidateRequest, CreateEmployerRequest,
MOCK_UUID, ApiActivityType, ChatMessageError, ChatMessageStatus, ChatMessageStreaming, ChatMessageUser, DocumentMessage, DocumentOptions, Job, JobRequirementsMessage, LoginRequest, CreateCandidateRequest, CreateEmployerRequest,
# User models
Candidate, Employer, BaseUserWithType, BaseUser, Guest, Authentication, AuthResponse, CandidateAI,
@ -172,7 +172,9 @@ ALGORITHM = "HS256"
# ============================
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request: Request, exc: RequestValidationError):
import traceback
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Validation error {request.method} {request.url.path}: {str(exc)}")
return JSONResponse(
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
@ -640,7 +642,7 @@ async def refresh_token_endpoint(
expiresAt=int((datetime.now(UTC) + timedelta(hours=24)).timestamp())
)
return create_success_response(auth_response.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(auth_response.model_dump(by_alias=True))
except jwt.PyJWTError:
return JSONResponse(
@ -719,7 +721,7 @@ async def create_candidate_ai(
candidate = CandidateAI.model_validate(candidate_data)
except ValidationError as e:
logger.error(f"❌ AI candidate data validation failed")
for lines in traceback.format_exc().splitlines():
for lines in backstory_traceback.format_exc().splitlines():
logger.error(lines)
logger.error(json.dumps(persona_message.content, indent=2))
for error in e.errors():
@ -730,7 +732,7 @@ async def create_candidate_ai(
)
except Exception as e:
# Log the error and return a validation error response
for lines in traceback.format_exc().splitlines():
for lines in backstory_traceback.format_exc().splitlines():
logger.error(lines)
logger.error(json.dumps(persona_message.content, indent=2))
return JSONResponse(
@ -802,7 +804,7 @@ async def create_candidate_ai(
})
except Exception as e:
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ AI Candidate creation error: {e}")
return JSONResponse(
status_code=500,
@ -1432,7 +1434,7 @@ async def login(
code_sent=mfa_code
)
)
return create_success_response(mfa_response.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(mfa_response.model_dump(by_alias=True))
# Trusted device - proceed with normal login
await device_manager.update_device_last_used(user_data["id"], device_id)
@ -1484,10 +1486,10 @@ async def login(
logger.info(f"🔑 User {request.login} logged in successfully from trusted device")
return create_success_response(auth_response.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(auth_response.model_dump(by_alias=True))
except Exception as e:
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Login error: {e}")
return JSONResponse(
status_code=500,
@ -1638,59 +1640,136 @@ async def verify_mfa(
logger.info(f"✅ MFA verified and login completed for {request.email}")
return create_success_response(auth_response.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(auth_response.model_dump(by_alias=True))
except Exception as e:
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ MFA verification error: {e}")
return JSONResponse(
status_code=500,
content=create_error_response("MFA_VERIFICATION_FAILED", "Failed to verify MFA")
)
class DebugStreamingResponse(StreamingResponse):
async def stream_response(self, send):
logger.debug("=== DEBUG STREAMING RESPONSE ===")
logger.debug(f"Body iterator: {self.body_iterator}")
logger.debug(f"Media type: {self.media_type}")
logger.debug(f"Charset: {self.charset}")
chunk_count = 0
async for chunk in self.body_iterator:
chunk_count += 1
logger.debug(f"Chunk {chunk_count}: type={type(chunk)}, repr={repr(chunk)[:200]}")
if not isinstance(chunk, (str, bytes)):
logger.error(f"PROBLEM FOUND! Chunk {chunk_count} is type {type(chunk)}, not str/bytes")
logger.error(f"Chunk content: {chunk}")
if hasattr(chunk, '__dict__'):
logger.error(f"Chunk attributes: {chunk.__dict__}")
# Try to help with conversion
if hasattr(chunk, 'model_dump_json'):
logger.error("Chunk appears to be a Pydantic model - should call .model_dump_json()")
elif hasattr(chunk, 'json'):
logger.error("Chunk appears to be a Pydantic model - should call .json()")
raise AttributeError(f"'{type(chunk).__name__}' object has no attribute 'encode'")
if isinstance(chunk, str):
chunk = chunk.encode(self.charset)
await send({
"type": "http.response.body",
"body": chunk,
"more_body": True,
})
await send({"type": "http.response.body", "body": b"", "more_body": False})
@api_router.post("/candidates/documents/upload")
async def upload_candidate_document(
file: UploadFile = File(...),
include_in_rag: bool = Form(True),
options: str = Form(...),
current_user = Depends(get_current_user),
database: RedisDatabase = Depends(get_database)
):
"""Upload a document for the current candidate"""
try:
# Parse the JSON string and create DocumentOptions object
options_dict = json.loads(options)
options = DocumentOptions(**options_dict)
except (json.JSONDecodeError, ValidationError) as e:
return StreamingResponse(
iter([ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads
content="Invalid options format. Please provide valid JSON."
)]),
media_type="text/event-stream"
)
# Check file size (limit to 10MB)
max_size = 10 * 1024 * 1024 # 10MB
file_content = await file.read()
if len(file_content) > max_size:
logger.info(f"⚠️ File too large: {file.filename} ({len(file_content)} bytes)")
return StreamingResponse(
iter([ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads
content="File size exceeds 10MB limit"
)]),
media_type="text/event-stream"
)
if len(file_content) == 0:
logger.info(f"⚠️ File is empty: {file.filename}")
return StreamingResponse(
iter([ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads
content="File is empty"
)]),
media_type="text/event-stream"
)
"""Upload a document for the current candidate"""
async def upload_stream_generator():
# Verify user is a candidate
if current_user.user_type != "candidate":
logger.warning(f"⚠️ Unauthorized upload attempt by user type: {current_user.user_type}")
return JSONResponse(
status_code=403,
content=create_error_response("FORBIDDEN", "Only candidates can upload documents")
error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads
content="Only candidates can upload documents"
)
yield error_message
return
candidate: Candidate = current_user
file.filename = re.sub(r'^.*/', '', file.filename) if file.filename else '' # Sanitize filename
if not file.filename or file.filename.strip() == "":
logger.warning("⚠️ File upload attempt with missing filename")
return JSONResponse(
status_code=400,
content=create_error_response("MISSING_FILENAME", "File must have a valid filename")
error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads
content="File must have a valid filename"
)
yield error_message
return
logger.info(f"📁 Received file upload: filename='{file.filename}', content_type='{file.content_type}', size estimate='{file.size if hasattr(file, 'size') else 'unknown'}'")
logger.info(f"📁 Received file upload: filename='{file.filename}', content_type='{file.content_type}', size='{len(file_content)} bytes'")
directory = "rag-content" if options.include_in_RAG else "files"
directory = "jobs" if options.is_job_document else directory
# Ensure the file does not already exist either in 'files' or in 'rag-content'
file_path = os.path.join(defines.user_dir, candidate.username, "rag-content", file.filename)
dir_path = os.path.join(defines.user_dir, candidate.username, directory)
if not os.path.exists(dir_path):
os.makedirs(dir_path, exist_ok=True)
file_path = os.path.join(dir_path, file.filename)
if os.path.exists(file_path):
logger.warning(f"⚠️ File already exists: {file_path}")
return JSONResponse(
status_code=400,
content=create_error_response("FILE_EXISTS", "File with this name already exists")
)
file_path = os.path.join(defines.user_dir, candidate.username, "files", file.filename)
if os.path.exists(file_path):
logger.warning(f"⚠️ File already exists: {file_path}")
return JSONResponse(
status_code=400,
content=create_error_response("FILE_EXISTS", "File with this name already exists")
error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads
content=f"File with this name already exists in the '{directory}' directory"
)
yield error_message
return
# Validate file type
allowed_types = ['.txt', '.md', '.docx', '.pdf', '.png', '.jpg', '.jpeg', '.gif']
@ -1698,24 +1777,13 @@ async def upload_candidate_document(
if file_extension not in allowed_types:
logger.warning(f"⚠️ Invalid file type: {file_extension} for file {file.filename}")
return JSONResponse(
status_code=400,
content=create_error_response(
"INVALID_FILE_TYPE",
f"File type {file_extension} not supported. Allowed types: {', '.join(allowed_types)}"
)
error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads
content=f"File type {file_extension} not supported. Allowed types: {', '.join(allowed_types)}"
)
# Check file size (limit to 10MB)
max_size = 10 * 1024 * 1024 # 10MB
file_content = await file.read()
if len(file_content) > max_size:
logger.info(f"⚠️ File too large: {file.filename} ({len(file_content)} bytes)")
return JSONResponse(
status_code=400,
content=create_error_response("FILE_TOO_LARGE", "File size exceeds 10MB limit")
)
yield error_message
return
# Create document metadata
document_id = str(uuid.uuid4())
document_type = get_document_type_from_filename(file.filename or "unknown.txt")
@ -1727,12 +1795,13 @@ async def upload_candidate_document(
type=document_type,
size=len(file_content),
upload_date=datetime.now(UTC),
include_in_RAG=include_in_rag,
options=options,
owner_id=candidate.id
)
# Save file to disk
file_path = os.path.join(defines.user_dir, candidate.username, "rag-content" if include_in_rag else "files", file.filename)
directory = os.path.join(defines.user_dir, candidate.username, directory)
file_path = os.path.join(directory, file.filename)
try:
with open(file_path, "wb") as f:
@ -1742,10 +1811,12 @@ async def upload_candidate_document(
except Exception as e:
logger.error(f"❌ Failed to save file to disk: {e}")
return JSONResponse(
status_code=500,
content=create_error_response("FILE_SAVE_ERROR", "Failed to save file to disk")
error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads
content="Failed to save file to disk",
)
yield error_message
return
if document_type != DocumentType.MARKDOWN and document_type != DocumentType.TXT:
p = pathlib.Path(file_path)
@ -1755,24 +1826,105 @@ async def upload_candidate_document(
if (not p_as_md.exists()) or (
p.stat().st_mtime > p_as_md.stat().st_mtime
):
status_message = ChatMessageStatus(
session_id=MOCK_UUID, # No session ID for document uploads
content=f"Converting {file.filename} to Markdown format for better processing...",
activity=ApiActivityType.CONVERTING
)
yield status_message
try:
from markitdown import MarkItDown # type: ignore
from markitdown import MarkItDown# type: ignore
md = MarkItDown(enable_plugins=False) # Set to True to enable plugins
result = md.convert(file_path)
result = md.convert(file_path, output_format="markdown")
p_as_md.write_text(result.text_content)
file_path = p_as_md
except Exception as e:
logging.error(f"Error convering via markdownit: {e}")
error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads
content=f"Failed to convert {file.filename} to Markdown.",
)
yield error_message
logger.error(f"❌ Error converting {file_path} to Markdown: {e}")
return
# Store document metadata in database
await database.set_document(document_id, document_data.model_dump())
await database.add_document_to_candidate(candidate.id, document_id)
logger.info(f"📄 Document uploaded: {file.filename} for candidate {candidate.username}")
return create_success_response(document_data.model_dump(by_alias=True, exclude_unset=True))
chat_message = DocumentMessage(
session_id=MOCK_UUID, # No session ID for document uploads
type=ApiMessageType.JSON,
status=ApiStatusType.DONE,
document=document_data,
)
yield chat_message
# If this is a job description, process it with the job requirements agent
if options.is_job_document:
content = None
with open(file_path, "r") as f:
content = f.read()
if not content or len(content) == 0:
error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads
content="Job description file is empty"
)
yield error_message
return
async with entities.get_candidate_entity(candidate=candidate) as candidate_entity:
chat_agent = candidate_entity.get_or_create_agent(agent_type=ChatContextType.JOB_REQUIREMENTS)
if not chat_agent:
error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads
content="No agent found for job requirements chat type"
)
yield error_message
return
message = None
async for message in chat_agent.generate(
llm=llm_manager.get_llm(),
model=defines.model,
session_id=MOCK_UUID,
prompt=content
):
if message.status != ApiStatusType.DONE:
yield message
if not message or not isinstance(message, JobRequirementsMessage):
error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads
content="Failed to process job description file"
)
yield error_message
return
yield message
try:
async def to_json(method):
try:
async for message in method:
json_data = message.model_dump(mode='json', by_alias=True)
json_str = json.dumps(json_data)
yield f"data: {json_str}\n\n".encode("utf-8")
except Exception as e:
logger.error(backstory_traceback.format_exc())
logger.error(f"Error in to_json conversion: {e}")
return
# return DebugStreamingResponse(
return StreamingResponse(
to_json(upload_stream_generator()),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache, no-store, must-revalidate",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Nginx
"X-Content-Type-Options": "nosniff",
"Access-Control-Allow-Origin": "*", # Adjust for your CORS needs
"Transfer-Encoding": "chunked",
},
)
except Exception as e:
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Document upload error: {e}")
return JSONResponse(
status_code=500,
@ -1850,7 +2002,7 @@ async def upload_candidate_profile(
return create_success_response(True)
except Exception as e:
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Document upload error: {e}")
return JSONResponse(
status_code=500,
@ -1905,7 +2057,7 @@ async def get_candidate_profile_image(
filename=candidate.profile_image
)
except Exception as e:
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Get candidate profile image failed: {str(e)}")
return JSONResponse(
status_code=500,
@ -1941,10 +2093,10 @@ async def get_candidate_documents(
total=len(documents)
)
return create_success_response(response_data.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(response_data.model_dump(by_alias=True))
except Exception as e:
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Get candidate documents error: {e}")
return JSONResponse(
status_code=500,
@ -2008,7 +2160,7 @@ async def get_document_content(
content=content,
size=document.size
)
return create_success_response(response.model_dump(by_alias=True, exclude_unset=True));
return create_success_response(response.model_dump(by_alias=True));
except Exception as e:
logger.error(f"❌ Failed to read document file: {e}")
@ -2018,7 +2170,7 @@ async def get_document_content(
)
except Exception as e:
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Get document content error: {e}")
return JSONResponse(
status_code=500,
@ -2121,7 +2273,7 @@ async def update_document(
logger.info(f"📄 Document updated: {document_id} for candidate {candidate.username}")
return create_success_response(updated_document.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(updated_document.model_dump(by_alias=True))
except Exception as e:
logger.error(f"❌ Update document error: {e}")
@ -2168,7 +2320,7 @@ async def delete_document(
)
# Delete file from disk
file_path = os.path.join(defines.user_dir, candidate.username, "rag-content" if document.include_in_RAG else "files", document.originalName)
file_path = os.path.join(defines.user_dir, candidate.username, "rag-content" if document.options.include_in_RAG else "files", document.originalName)
file_path = pathlib.Path(file_path)
try:
@ -2201,7 +2353,7 @@ async def delete_document(
})
except Exception as e:
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Delete document error: {e}")
return JSONResponse(
status_code=500,
@ -2237,7 +2389,7 @@ async def search_candidate_documents(
total=len(documents)
)
return create_success_response(response_data.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(response_data.model_dump(by_alias=True))
except Exception as e:
logger.error(f"❌ Search documents error: {e}")
@ -2279,7 +2431,7 @@ async def post_candidate_vector_content(
content = candidate_entity.file_watcher.prepare_metadata(metadata)
rag_response = RagContentResponse(id=id, content=content, metadata=metadata)
logger.info(f"✅ Fetched RAG content for document id {id} for candidate {candidate.username}")
return create_success_response(rag_response.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(rag_response.model_dump(by_alias=True))
return JSONResponse(f"Document id {rag_document.id} not found.", 404)
except Exception as e:
@ -2435,7 +2587,7 @@ async def update_candidate(
updated_candidate = CandidateAI.model_validate(candidate_dict) if is_AI else Candidate.model_validate(candidate_dict)
await database.set_candidate(candidate_id, updated_candidate.model_dump())
return create_success_response(updated_candidate.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(updated_candidate.model_dump(by_alias=True))
except Exception as e:
logger.error(f"❌ Update candidate error: {e}")
@ -2469,7 +2621,7 @@ async def get_candidates(
)
paginated_response = create_paginated_response(
[c.model_dump(by_alias=True, exclude_unset=True) for c in paginated_candidates],
[c.model_dump(by_alias=True) for c in paginated_candidates],
page, limit, total
)
@ -2518,7 +2670,7 @@ async def search_candidates(
)
paginated_response = create_paginated_response(
[c.model_dump(by_alias=True, exclude_unset=True) for c in paginated_candidates],
[c.model_dump(by_alias=True) for c in paginated_candidates],
page, limit, total
)
@ -2633,7 +2785,7 @@ async def create_candidate_job(
await database.set_job(job.id, job.model_dump())
return create_success_response(job.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(job.model_dump(by_alias=True))
except Exception as e:
logger.error(f"❌ Job creation error: {e}")
@ -2661,7 +2813,7 @@ async def get_job(
await database.set_job(job_id, job_data)
job = Job.model_validate(job_data)
return create_success_response(job.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(job.model_dump(by_alias=True))
except Exception as e:
logger.error(f"❌ Get job error: {e}")
@ -2699,7 +2851,7 @@ async def get_jobs(
)
paginated_response = create_paginated_response(
[j.model_dump(by_alias=True, exclude_unset=True) for j in paginated_jobs],
[j.model_dump(by_alias=True) for j in paginated_jobs],
page, limit, total
)
@ -2744,7 +2896,7 @@ async def search_jobs(
)
paginated_response = create_paginated_response(
[j.model_dump(by_alias=True, exclude_unset=True) for j in paginated_jobs],
[j.model_dump(by_alias=True) for j in paginated_jobs],
page, limit, total
)
@ -2803,7 +2955,7 @@ async def post_candidate_rag_search(
content=create_error_response("AGENT_NOT_FOUND", "No agent found for this chat type")
)
user_message = ChatMessageUser(sender_id=candidate.id, session_id="", content=query, timestamp=datetime.now(UTC))
user_message = ChatMessageUser(sender_id=candidate.id, session_id=MOCK_UUID, content=query, timestamp=datetime.now(UTC))
rag_message = None
async for generated_message in chat_agent.generate(
llm=llm_manager.get_llm(),
@ -2818,7 +2970,7 @@ async def post_candidate_rag_search(
status_code=500,
content=create_error_response("NO_RESPONSE", "No response generated for the RAG search")
)
return create_success_response(rag_message.metadata.rag_results[0].model_dump(by_alias=True, exclude_unset=True))
return create_success_response(rag_message.metadata.rag_results[0].model_dump(by_alias=True))
except Exception as e:
logger.error(f"❌ Get candidate chat summary error: {e}")
@ -2863,7 +3015,7 @@ async def get_candidate(
candidate = Candidate.model_validate(candidate_data) if not candidate_data.get("is_AI") else CandidateAI.model_validate(candidate_data)
return create_success_response(candidate.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(candidate.model_dump(by_alias=True))
except Exception as e:
logger.error(f"❌ Get candidate error: {e}")
@ -3020,10 +3172,10 @@ async def create_chat_session(
logger.info(f"✅ Chat session created: {chat_session.id} for user {current_user.id}" +
(f" about candidate {candidate_data.full_name}" if candidate_data else ""))
return create_success_response(chat_session.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(chat_session.model_dump(by_alias=True))
except Exception as e:
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Chat session creation error: {e}")
logger.info(json.dumps(session_data, indent=2))
return JSONResponse(
@ -3096,7 +3248,7 @@ async def post_chat_session_message_stream(
)
except Exception as e:
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Chat message streaming error")
return JSONResponse(
status_code=500,
@ -3143,7 +3295,7 @@ async def get_chat_session_messages(
paginated_messages = messages_list[start:end]
paginated_response = create_paginated_response(
[m.model_dump(by_alias=True, exclude_unset=True) for m in paginated_messages],
[m.model_dump(by_alias=True) for m in paginated_messages],
page, limit, total
)
@ -3239,7 +3391,7 @@ async def update_chat_session(
logger.info(f"✅ Chat session {session_id} updated by user {current_user.id}")
return create_success_response(updated_session.model_dump(by_alias=True, exclude_unset=True))
return create_success_response(updated_session.model_dump(by_alias=True))
except ValueError as ve:
logger.warning(f"⚠️ Validation error updating chat session: {ve}")
@ -3392,7 +3544,7 @@ async def get_candidate_skill_match(
agent.generate(
llm=llm_manager.get_llm(),
model=defines.model,
session_id="",
session_id=MOCK_UUID,
prompt=requirement,
),
)
@ -3410,7 +3562,7 @@ async def get_candidate_skill_match(
})
except Exception as e:
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Get candidate skill match error: {e}")
return JSONResponse(
status_code=500,
@ -3458,7 +3610,7 @@ async def get_candidate_chat_sessions(
context.related_entity_id == candidate.id):
sessions_list.append(session)
except Exception as e:
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Failed to validate session ({index}): {e}")
logger.error(f"❌ Session data: {session_data}")
continue
@ -3473,7 +3625,7 @@ async def get_candidate_chat_sessions(
paginated_sessions = sessions_list[start:end]
paginated_response = create_paginated_response(
[s.model_dump(by_alias=True, exclude_unset=True) for s in paginated_sessions],
[s.model_dump(by_alias=True) for s in paginated_sessions],
page, limit, total
)
@ -3711,7 +3863,9 @@ async def log_requests(request: Request, call_next):
logger.warning(f"⚠️ Response {request.method} {response.status_code}: Path: {request.url.path}")
return response
except Exception as e:
import traceback
logger.error(traceback.format_exc())
logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Error processing request: {str(e)}, Path: {request.url.path}, Method: {request.method}")
return JSONResponse(status_code=400, content={"detail": "Invalid HTTP request"})

View File

@ -518,6 +518,13 @@ class DocumentType(str, Enum):
MARKDOWN = "markdown"
IMAGE = "image"
class DocumentOptions(BaseModel):
include_in_RAG: Optional[bool] = Field(True, alias="includeInRAG")
is_job_document: Optional[bool] = Field(False, alias="isJobDocument")
model_config = {
"populate_by_name": True # Allow both field names and aliases
}
class Document(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
owner_id: str = Field(..., alias="ownerId")
@ -526,7 +533,7 @@ class Document(BaseModel):
type: DocumentType
size: int
upload_date: datetime = Field(default_factory=lambda: datetime.now(UTC), alias="uploadDate")
include_in_RAG: bool = Field(default=True, alias="includeInRAG")
options: DocumentOptions = Field(default_factory=DocumentOptions, alias="options")
rag_chunks: Optional[int] = Field(default=0, alias="ragChunks")
model_config = {
"populate_by_name": True # Allow both field names and aliases
@ -545,10 +552,13 @@ class DocumentContentResponse(BaseModel):
class DocumentListResponse(BaseModel):
documents: List[Document]
total: int
model_config = {
"populate_by_name": True # Allow both field names and aliases
}
class DocumentUpdateRequest(BaseModel):
filename: Optional[str] = None
include_in_RAG: Optional[bool] = Field(None, alias="includeInRAG")
options: Optional[DocumentOptions] = None
model_config = {
"populate_by_name": True # Allow both field names and aliases
}
@ -774,6 +784,8 @@ class ApiMessage(BaseModel):
"populate_by_name": True # Allow both field names and aliases
}
MOCK_UUID = str(uuid.uuid4())
class ChatMessageStreaming(ApiMessage):
status: ApiStatusType = ApiStatusType.STREAMING
type: ApiMessageType = ApiMessageType.TEXT
@ -785,6 +797,7 @@ class ApiActivityType(str, Enum):
SEARCHING = "searching" # Used when generating RAG information
THINKING = "thinking" # Used when determing if AI will use tools
GENERATING = "generating" # Used when AI is generating a response
CONVERTING = "converting" # Used when AI is generating a response
GENERATING_IMAGE = "generating_image" # Used when AI is generating an image
TOOLING = "tooling" # Used when AI is using tools
HEARTBEAT = "heartbeat" # Used for periodic updates
@ -813,6 +826,13 @@ class JobRequirementsMessage(ApiMessage):
description: str
requirements: Optional[JobRequirements]
class DocumentMessage(ApiMessage):
type: ApiMessageType = ApiMessageType.JSON
document: Document = Field(..., alias="document")
model_config = {
"populate_by_name": True # Allow both field names and aliases
}
class ChatMessageMetaData(BaseModel):
model: AIModelType = AIModelType.QWEN2_5
temperature: float = 0.7