Working on user management
This commit is contained in:
parent
40d59042ef
commit
357b42ea7c
@ -6,10 +6,14 @@ import {
|
||||
Divider,
|
||||
useTheme,
|
||||
} from '@mui/material';
|
||||
import DeleteIcon from '@mui/icons-material/Delete';
|
||||
import { useMediaQuery } from '@mui/material';
|
||||
import { Candidate, CandidateAI } from 'types/types';
|
||||
import { CopyBubble } from "components/CopyBubble";
|
||||
import { rest } from 'lodash';
|
||||
import { AIBanner } from 'components/ui/AIBanner';
|
||||
import { useAuth } from 'hooks/AuthContext';
|
||||
import { DeleteConfirmation } from './DeleteConfirmation';
|
||||
|
||||
interface CandidateInfoProps {
|
||||
candidate: Candidate;
|
||||
@ -21,6 +25,7 @@ interface CandidateInfoProps {
|
||||
|
||||
const CandidateInfo: React.FC<CandidateInfoProps> = (props: CandidateInfoProps) => {
|
||||
const { candidate } = props;
|
||||
const { user, apiClient } = useAuth();
|
||||
const {
|
||||
sx,
|
||||
action = '',
|
||||
@ -29,11 +34,19 @@ const CandidateInfo: React.FC<CandidateInfoProps> = (props: CandidateInfoProps)
|
||||
} = props;
|
||||
const theme = useTheme();
|
||||
const isMobile = useMediaQuery(theme.breakpoints.down('md'));
|
||||
const ai: CandidateAI | null = ('isAI' in candidate) ? candidate as CandidateAI : null;
|
||||
const isAdmin = user?.isAdmin;
|
||||
|
||||
const deleteCandidate = async (candidateId: string | undefined) => {
|
||||
if (candidateId) {
|
||||
await apiClient.deleteCandidate(candidateId);
|
||||
}
|
||||
}
|
||||
|
||||
if (!candidate) {
|
||||
return <Box>No user loaded.</Box>;
|
||||
}
|
||||
console.log(candidate);
|
||||
|
||||
return (
|
||||
<Card
|
||||
elevation={elevation}
|
||||
@ -47,7 +60,8 @@ const CandidateInfo: React.FC<CandidateInfoProps> = (props: CandidateInfoProps)
|
||||
}}
|
||||
{...rest}
|
||||
>
|
||||
<CardContent sx={{ flexGrow: 1, p: 3, height: '100%', display: 'flex', flexDirection: 'column', alignItems: 'stretch' }}>
|
||||
<CardContent sx={{ display: "flex", flexGrow: 1, p: 3, height: '100%', flexDirection: 'column', alignItems: 'stretch', position: "relative" }}>
|
||||
{ai && <AIBanner />}
|
||||
|
||||
<Grid container spacing={2}>
|
||||
<Grid
|
||||
@ -129,6 +143,17 @@ const CandidateInfo: React.FC<CandidateInfoProps> = (props: CandidateInfoProps)
|
||||
}
|
||||
</>}
|
||||
</Grid>
|
||||
{isAdmin && ai &&
|
||||
<DeleteConfirmation
|
||||
onDelete={() => { deleteCandidate(candidate.id); }}
|
||||
sx={{ minWidth: 'auto', px: 2, maxHeight: "min-content", color: "red" }}
|
||||
action="delete"
|
||||
label="user"
|
||||
title="Delete AI user"
|
||||
icon=<DeleteIcon />
|
||||
message={`Are you sure you want to delete ${candidate.username}? This action cannot be undone.`}
|
||||
/>}
|
||||
|
||||
</Grid>
|
||||
</CardContent>
|
||||
</Card>
|
||||
|
@ -28,6 +28,7 @@ interface DeleteConfirmationProps {
|
||||
onConfirm?: () => void;
|
||||
title?: string;
|
||||
message?: string;
|
||||
icon?: React.ReactNode;
|
||||
|
||||
// Optional props for button customization in controlled mode
|
||||
hideButton?: boolean;
|
||||
@ -56,7 +57,8 @@ const DeleteConfirmation = (props: DeleteConfirmationProps) => {
|
||||
hideButton = false,
|
||||
confirmButtonText,
|
||||
cancelButtonText = "Cancel",
|
||||
sx
|
||||
sx,
|
||||
icon = <ResetIcon />,
|
||||
} = props;
|
||||
|
||||
// Internal state for uncontrolled mode
|
||||
@ -104,14 +106,14 @@ const DeleteConfirmation = (props: DeleteConfirmationProps) => {
|
||||
<span style={{ display: "flex" }}> {/* This span is used to wrap the IconButton to ensure Tooltip works even when disabled */}
|
||||
<IconButton
|
||||
aria-label={action}
|
||||
onClick={handleClickOpen}
|
||||
onClick={(e) => { e.stopPropagation(); e.preventDefault(); handleClickOpen(); }}
|
||||
color={color || "inherit"}
|
||||
sx={{ display: "flex", margin: 'auto 0px', ...sx }}
|
||||
size="large"
|
||||
edge="start"
|
||||
disabled={disabled}
|
||||
>
|
||||
<ResetIcon />
|
||||
{icon}
|
||||
</IconButton>
|
||||
</span>
|
||||
</Tooltip>
|
||||
|
@ -100,6 +100,7 @@ const getStyle = (theme: Theme, type: ChatMessageType): any => {
|
||||
color: theme.palette.text.primary,
|
||||
opacity: 0.95,
|
||||
},
|
||||
preparing: 'status',
|
||||
processing: 'status',
|
||||
qualifications: {
|
||||
...defaultStyle,
|
||||
@ -158,7 +159,7 @@ const getStyle = (theme: Theme, type: ChatMessageType): any => {
|
||||
return styles[type];
|
||||
}
|
||||
|
||||
const getIcon = (messageType: string): React.ReactNode | null => {
|
||||
const getIcon = (messageType: ChatMessageType): React.ReactNode | null => {
|
||||
const icons: any = {
|
||||
error: <ErrorOutline color="error" />,
|
||||
generating: <LocationSearchingIcon />,
|
||||
@ -340,7 +341,7 @@ const MessageContainer = (props: MessageContainerProps) => {
|
||||
gap: 1,
|
||||
...sx,
|
||||
}}>
|
||||
<Box sx={{ display: "flex", flexDirection: 'row' }}>
|
||||
<Box sx={{ display: "flex", flexDirection: 'row', alignItems: 'center', gap: 1 }}>
|
||||
{icon !== null && icon}
|
||||
{messageView}
|
||||
</Box>
|
||||
|
38
frontend/src/components/ui/AIBanner.css
Normal file
38
frontend/src/components/ui/AIBanner.css
Normal file
@ -0,0 +1,38 @@
|
||||
.aibanner-clipper {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
width: 100%;
|
||||
height: 30px;
|
||||
overflow: visible;
|
||||
pointer-events: none;
|
||||
z-index: 1101;
|
||||
cursor: pointer;
|
||||
font-family: 'Roboto';
|
||||
line-height: 30px;
|
||||
}
|
||||
|
||||
.aibanner-label:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.aibanner-label {
|
||||
width: 300px;
|
||||
position: absolute;
|
||||
display: flex;
|
||||
right: -70px;
|
||||
top: 40px;
|
||||
height: 32px;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
transform: rotate(45deg);
|
||||
transform-origin: center center;
|
||||
font-size: 20px;
|
||||
text-align: center;
|
||||
font-weight: bold;
|
||||
color: #484848;
|
||||
background: rgba(94, 255, 0, 0.5);
|
||||
border: 1px solid rgb(86, 128, 23);
|
||||
z-index: 11;
|
||||
pointer-events: auto;
|
||||
opacity: 0.5;
|
||||
}
|
26
frontend/src/components/ui/AIBanner.tsx
Normal file
26
frontend/src/components/ui/AIBanner.tsx
Normal file
@ -0,0 +1,26 @@
|
||||
import React, { useRef } from 'react';
|
||||
import Box from '@mui/material/Box';
|
||||
import { SxProps } from '@mui/material/styles';
|
||||
|
||||
import './AIBanner.css';
|
||||
|
||||
type AIBannerProps = {
|
||||
sx?: SxProps;
|
||||
}
|
||||
|
||||
const AIBanner: React.FC<AIBannerProps> = (props : AIBannerProps) => {
|
||||
const { sx = {} } = props;
|
||||
const aibannerRef = useRef<HTMLElement | null>(null);
|
||||
|
||||
return (
|
||||
<Box sx={sx} className='aibanner-clipper'>
|
||||
<Box ref={aibannerRef} className='aibanner-label'>
|
||||
<Box>AI Generated</Box>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export {
|
||||
AIBanner
|
||||
};
|
@ -5,7 +5,7 @@ import Box from '@mui/material/Box';
|
||||
|
||||
import { BackstoryPageProps } from '../components/BackstoryTab';
|
||||
import { CandidateInfo } from 'components/CandidateInfo';
|
||||
import { Candidate } from "../types/types";
|
||||
import { Candidate, CandidateAI } from "../types/types";
|
||||
import { useAuth } from 'hooks/AuthContext';
|
||||
import { useSelectedCandidate } from 'hooks/GlobalContext';
|
||||
|
||||
|
@ -21,6 +21,8 @@ import { StreamingResponse } from 'services/api-client';
|
||||
import { ChatContext, ChatMessage, ChatMessageUser, ChatMessageBase, ChatSession, ChatQuery, Candidate, CandidateAI } from 'types/types';
|
||||
import { useAuth } from 'hooks/AuthContext';
|
||||
import { Message } from 'components/Message';
|
||||
import { Types } from '@uiw/react-json-view';
|
||||
import { assert } from 'console';
|
||||
|
||||
const emptyUser: CandidateAI = {
|
||||
userType: "candidate",
|
||||
@ -63,7 +65,7 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
const [processing, setProcessing] = useState<boolean>(false);
|
||||
const [generatedUser, setGeneratedUser] = useState<CandidateAI | null>(null);
|
||||
const [prompt, setPrompt] = useState<string>('');
|
||||
const [resume, setResume] = useState<string>('');
|
||||
const [resume, setResume] = useState<string | null>(null);
|
||||
const [canGenImage, setCanGenImage] = useState<boolean>(false);
|
||||
const [timestamp, setTimestamp] = useState<number>(0);
|
||||
const [state, setState] = useState<number>(0); // Replaced stateRef
|
||||
@ -77,27 +79,30 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
|
||||
/* Create the chat session */
|
||||
useEffect(() => {
|
||||
if (chatSession || loading) {
|
||||
if (chatSession || loading || !generatedUser) {
|
||||
return;
|
||||
}
|
||||
|
||||
const createChatSession = async () => {
|
||||
console.log('Creating chat session');
|
||||
try {
|
||||
const chatContext: ChatContext = { type: "generate_persona" };
|
||||
const response: ChatSession = await apiClient.createChatSession(chatContext);
|
||||
const response: ChatSession = await apiClient.createCandidateChatSession(
|
||||
generatedUser.username,
|
||||
"generate_image",
|
||||
"Profile image generation"
|
||||
);
|
||||
setChatSession(response);
|
||||
console.log(`Chat session created for generate_persona`, response);
|
||||
setSnack(`Chat session created for generate_persona: ${response.id}`);
|
||||
console.log(`Chat session created for generate_image`, response);
|
||||
setSnack(`Chat session created for generate_image: ${response.id}`);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
setSnack("Unable to create chat session.", "error");
|
||||
setSnack("Unable to create image generation session.", "error");
|
||||
}
|
||||
};
|
||||
|
||||
setLoading(true);
|
||||
createChatSession().then(() => { setLoading(false) });
|
||||
}, [chatSession, loading, setChatSession, setLoading, setSnack]);
|
||||
}, [generatedUser, chatSession, loading, setChatSession, setLoading, setSnack]);
|
||||
|
||||
const generatePersona = async (prompt: string) => {
|
||||
const userMessage: ChatMessageUser = {
|
||||
@ -113,13 +118,15 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
setProcessingMessage({ ...defaultMessage, content: "Generating persona..." });
|
||||
try {
|
||||
const result = await apiClient.createCandidateAI(userMessage);
|
||||
console.log(result.message);
|
||||
console.log(result.message, result);
|
||||
setGeneratedUser(result.candidate);
|
||||
setResume(result.resume);
|
||||
setCanGenImage(true);
|
||||
setShouldGenerateProfile(true); // Reset the flag
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
setPrompt('');
|
||||
setResume(null);
|
||||
setProcessing(false);
|
||||
setProcessingMessage(null);
|
||||
setSnack("Unable to generate AI persona", "error");
|
||||
@ -147,127 +154,101 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
onEnter(value);
|
||||
}, [onEnter]);
|
||||
|
||||
// Effect to trigger profile generation when user data is ready
|
||||
// Effect to trigger profile image generation when user data is ready
|
||||
useEffect(() => {
|
||||
console.log("useEffect triggered - shouldGenerateProfile:", shouldGenerateProfile, "user:", generatedUser?.username, generatedUser?.firstName);
|
||||
if (shouldGenerateProfile && generatedUser?.username !== "[blank]" && generatedUser?.firstName !== "[blank]") {
|
||||
console.log("Triggering profile generation with updated user data:", generatedUser);
|
||||
if (!chatSession || !generatedUser?.username) {
|
||||
return;
|
||||
}
|
||||
const username = generatedUser.username;
|
||||
|
||||
if (!shouldGenerateProfile || username === "[blank]" || generatedUser?.firstName === "[blank]") {
|
||||
return;
|
||||
}
|
||||
|
||||
if (controllerRef.current) {
|
||||
console.log("Controller already active, skipping profile generation");
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't generate if we still have blank user data
|
||||
if (generatedUser?.username === "[blank]" || generatedUser?.firstName === "[blank]") {
|
||||
console.log("Cannot generate profile: user data not ready");
|
||||
return;
|
||||
}
|
||||
|
||||
const imagePrompt = `A photorealistic profile picture of a ${generatedUser?.age} year old ${generatedUser?.gender?.toLocaleLowerCase()} ${generatedUser?.ethnicity?.toLocaleLowerCase()} person. ${prompt}`
|
||||
setProcessingMessage({ ...defaultMessage, content: 'Starting image generation...' });
|
||||
setProcessing(true);
|
||||
setCanGenImage(false);
|
||||
setState(3);
|
||||
const start = Date.now();
|
||||
|
||||
// controllerRef.current = streamQueryResponse({
|
||||
// query: {
|
||||
// prompt: imagePrompt,
|
||||
// agentOptions: {
|
||||
// username: generatedUser?.username,
|
||||
// filename: "profile.png"
|
||||
// }
|
||||
// },
|
||||
// type: "image",
|
||||
// sessionId,
|
||||
// connectionBase,
|
||||
// onComplete: (msg) => {
|
||||
// // console.log("Profile generation response:", msg);
|
||||
// switch (msg.status) {
|
||||
// case "partial":
|
||||
// case "done":
|
||||
// if (msg.status === "done") {
|
||||
// setProcessing(false);
|
||||
// controllerRef.current = null;
|
||||
// setState(0);
|
||||
// setCanGenImage(true);
|
||||
// setShouldGenerateProfile(false);
|
||||
// setGeneratedUser({
|
||||
// ...(generatedUser ? generatedUser : emptyUser),
|
||||
// hasProfile: true
|
||||
// });
|
||||
// }
|
||||
// break;
|
||||
// case "error":
|
||||
// console.log(`Error generating profile: ${msg.response} after ${Date.now() - start}`);
|
||||
// setSnack(msg.response || "", "error");
|
||||
// setProcessing(false);
|
||||
// controllerRef.current = null;
|
||||
// setState(0);
|
||||
// setCanGenImage(true);
|
||||
// setShouldGenerateProfile(false);
|
||||
// break;
|
||||
// default:
|
||||
// let data: any = {};
|
||||
// try {
|
||||
// data = typeof msg.response === 'string' ? JSON.parse(msg.response) : msg.response;
|
||||
// } catch (e) {
|
||||
// data = { message: msg.response };
|
||||
// }
|
||||
// if (msg.status !== "heartbeat") {
|
||||
// console.log(data);
|
||||
// }
|
||||
// if (data.timestamp) {
|
||||
// setTimestamp(data.timestamp);
|
||||
// } else {
|
||||
// setTimestamp(Date.now())
|
||||
// }
|
||||
// if (data.message) {
|
||||
// setStatus(data.message);
|
||||
// }
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
// });
|
||||
const chatMessage: ChatMessageUser = {
|
||||
sessionId: chatSession.id || '',
|
||||
status: "done",
|
||||
type: "user",
|
||||
sender: "user",
|
||||
timestamp: new Date(),
|
||||
content: prompt
|
||||
};
|
||||
|
||||
controllerRef.current = apiClient.sendMessageStream(chatMessage, {
|
||||
onMessage: async (msg: ChatMessage) => {
|
||||
console.log(`onMessage: ${msg.type} ${msg.content}`, msg);
|
||||
if (msg.type === "heartbeat") {
|
||||
const heartbeat = JSON.parse(msg.content);
|
||||
setTimestamp(heartbeat.timestamp);
|
||||
}
|
||||
}, [shouldGenerateProfile, generatedUser, prompt, setSnack]);
|
||||
|
||||
// Handle streaming updates based on current state
|
||||
useEffect(() => {
|
||||
const content = streamingMessage?.content.trim();
|
||||
if (!content) {
|
||||
return;
|
||||
if (msg.type === "thinking") {
|
||||
const status = JSON.parse(msg.content);
|
||||
setProcessingMessage({ ...defaultMessage, content: status.message });
|
||||
}
|
||||
|
||||
|
||||
if (msg.type === "response") {
|
||||
controllerRef.current = null;
|
||||
try {
|
||||
switch (state) {
|
||||
case 0: /* Generating persona */
|
||||
const partialUser = { ...emptyUser, ...JSON.parse(jsonrepair(content)) };
|
||||
if (!partialUser.fullName) {
|
||||
partialUser.fullName = `${partialUser.firstName} ${partialUser.lastName}`;
|
||||
await apiClient.updateCandidate(generatedUser.id || '', { profileImage: "profile.png" });
|
||||
const { success, message } = await apiClient.deleteChatSession(chatSession.id || '');
|
||||
console.log(`Profile generated for ${username} and chat session was ${!success ? 'not ' : ''} deleted: ${message}}`);
|
||||
setGeneratedUser({
|
||||
...generatedUser,
|
||||
profileImage: "profile.png"
|
||||
} as CandidateAI);
|
||||
setState(0);
|
||||
setCanGenImage(true);
|
||||
setShouldGenerateProfile(false);
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
setSnack(`Unable to update ${username} to indicate they have a profile picture.`, "error");
|
||||
}
|
||||
setGeneratedUser(partialUser);
|
||||
break;
|
||||
case 1: /* Generating resume */
|
||||
setResume(content);
|
||||
break;
|
||||
case 3: /* RAG streaming */
|
||||
break;
|
||||
case 4: /* Image streaming */
|
||||
break;
|
||||
}
|
||||
} catch {
|
||||
// Ignore JSON parsing errors during streaming
|
||||
},
|
||||
onError: (error) => {
|
||||
console.log("onError:", error);
|
||||
// Type-guard to determine if this is a ChatMessageBase or a string
|
||||
if (typeof error === "object" && error !== null && "content" in error) {
|
||||
setSnack(error.content || "Unknown error generating profile image", "error");
|
||||
} else {
|
||||
setSnack(error as string, "error");
|
||||
}
|
||||
}, [streaming, state]);
|
||||
setProcessingMessage(null);
|
||||
setStreaming(false);
|
||||
setProcessing(false);
|
||||
controllerRef.current = null;
|
||||
setState(0);
|
||||
setCanGenImage(true);
|
||||
setShouldGenerateProfile(false);
|
||||
},
|
||||
onComplete: () => {
|
||||
setProcessingMessage(null);
|
||||
setStreaming(false);
|
||||
setProcessing(false);
|
||||
controllerRef.current = null;
|
||||
setState(0);
|
||||
setCanGenImage(true);
|
||||
setShouldGenerateProfile(false);
|
||||
},
|
||||
onStatusChange: (status: string) => {
|
||||
console.log(`onStatusChange: ${status}`);
|
||||
},
|
||||
});
|
||||
}, [chatSession, shouldGenerateProfile, generatedUser, prompt, setSnack]);
|
||||
|
||||
if (!user?.isAdmin) {
|
||||
return (<Box>You must be logged in as an admin to generate AI candidates.</Box>);
|
||||
}
|
||||
if (!chatSession) {
|
||||
return (<></>);
|
||||
}
|
||||
|
||||
return (
|
||||
<Box className="GenerateCandidate" sx={{
|
||||
display: "flex",
|
||||
@ -291,7 +272,7 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
justifyContent: "center",
|
||||
m: 2,
|
||||
}}>
|
||||
{processingMessage && <Message message={processingMessage} {...{ chatSession, submitQuery, setSnack }} />}
|
||||
{processingMessage && chatSession && <Message message={processingMessage} {...{ chatSession, submitQuery, setSnack }} />}
|
||||
<PropagateLoader
|
||||
size="10px"
|
||||
loading={processing}
|
||||
@ -334,12 +315,13 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
</Tooltip>
|
||||
</Box>
|
||||
</Box>
|
||||
{ resume !== '' &&
|
||||
{resume &&
|
||||
<Paper sx={{pt: 1, pb: 1, pl: 2, pr: 2}}>
|
||||
<Scrollable sx={{flexGrow: 1}}>
|
||||
<StyledMarkdown {...{ content: resume, setSnack, submitQuery }} />
|
||||
</Scrollable>
|
||||
</Paper> }
|
||||
</Paper>
|
||||
}
|
||||
<BackstoryTextField
|
||||
style={{ flexGrow: 0, flexShrink: 1 }}
|
||||
ref={backstoryTextRef}
|
||||
|
@ -48,6 +48,11 @@ interface StreamingOptions {
|
||||
signal?: AbortSignal;
|
||||
}
|
||||
|
||||
interface DeleteCandidateResponse {
|
||||
success: boolean;
|
||||
message: string;
|
||||
}
|
||||
|
||||
interface StreamingResponse {
|
||||
messageId: string;
|
||||
cancel: () => void;
|
||||
@ -57,6 +62,7 @@ interface StreamingResponse {
|
||||
interface CreateCandidateAIResponse {
|
||||
message: string;
|
||||
candidate: Types.CandidateAI;
|
||||
resume: string;
|
||||
};
|
||||
|
||||
|
||||
@ -209,7 +215,8 @@ class ApiClient {
|
||||
const result = await handleApiResponse<CreateCandidateAIResponse>(response);
|
||||
return {
|
||||
message: result.message,
|
||||
candidate: convertFromApi<Types.CandidateAI>(result.candidate, "CandidateAI")
|
||||
candidate: convertFromApi<Types.CandidateAI>(result.candidate, "CandidateAI"),
|
||||
resume: result.resume
|
||||
};
|
||||
}
|
||||
|
||||
@ -518,6 +525,16 @@ class ApiClient {
|
||||
return this.handleApiResponseWithConversion<Types.Candidate>(response, 'Candidate');
|
||||
}
|
||||
|
||||
async deleteCandidate(id: string): Promise<DeleteCandidateResponse> {
|
||||
const response = await fetch(`${this.baseUrl}/candidates/${id}`, {
|
||||
method: 'PATCH',
|
||||
headers: this.defaultHeaders,
|
||||
body: JSON.stringify({ id })
|
||||
});
|
||||
|
||||
return handleApiResponse<DeleteCandidateResponse>(response);
|
||||
}
|
||||
|
||||
async uploadCandidateProfile(file: File): Promise<boolean> {
|
||||
const formData = new FormData()
|
||||
formData.append('file', file);
|
||||
@ -970,7 +987,7 @@ class ApiClient {
|
||||
let buffer = '';
|
||||
let incomingMessage: Types.ChatMessage | null = null;
|
||||
const incomingMessageList: Types.ChatMessage[] = [];
|
||||
|
||||
let incomingStatus : Types.ChatStatusType | null = null;
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
@ -998,8 +1015,9 @@ class ApiClient {
|
||||
const convertedIncoming = convertChatMessageFromApi(incoming);
|
||||
|
||||
// Trigger callbacks based on status
|
||||
if (convertedIncoming.status !== incomingMessage?.status) {
|
||||
if (convertedIncoming.status !== incomingStatus) {
|
||||
options.onStatusChange?.(convertedIncoming.status);
|
||||
incomingStatus = convertedIncoming.status;
|
||||
}
|
||||
|
||||
// Handle different status types
|
||||
@ -1287,381 +1305,5 @@ export interface PendingVerification {
|
||||
attempts: number;
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Usage Examples
|
||||
// ============================
|
||||
|
||||
/*
|
||||
// Registration with email verification
|
||||
const apiClient = new ApiClient();
|
||||
|
||||
try {
|
||||
const result = await apiClient.createCandidateWithVerification({
|
||||
email: 'user@example.com',
|
||||
username: 'johndoe',
|
||||
password: 'SecurePassword123!',
|
||||
firstName: 'John',
|
||||
lastName: 'Doe',
|
||||
phone: '+1234567890'
|
||||
});
|
||||
|
||||
console.log(result.message); // "Registration successful! Please check your email..."
|
||||
|
||||
// Set pending verification status
|
||||
apiClient.setPendingEmailVerification(result.email);
|
||||
|
||||
// Show success dialog to user
|
||||
showRegistrationSuccessDialog(result);
|
||||
|
||||
} catch (error) {
|
||||
console.error('Registration failed:', error);
|
||||
}
|
||||
|
||||
// login with MFA support
|
||||
try {
|
||||
const loginResult = await apiClient.login('user@example.com', 'password');
|
||||
|
||||
if ('mfaRequired' in loginResult && loginResult.mfaRequired) {
|
||||
// Show MFA dialog
|
||||
showMFADialog({
|
||||
email: 'user@example.com',
|
||||
deviceId: loginResult.deviceId!,
|
||||
deviceName: loginResult.message || 'Unknown device'
|
||||
});
|
||||
} else {
|
||||
// Normal login success
|
||||
const authData = loginResult as Types.AuthResponse;
|
||||
handleLoginSuccess(authData);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Login failed:', error);
|
||||
}
|
||||
|
||||
// Email verification
|
||||
try {
|
||||
const verificationResult = await apiClient.verifyEmail({
|
||||
token: 'verification-token-from-email'
|
||||
});
|
||||
|
||||
console.log(verificationResult.message); // "Email verified successfully!"
|
||||
|
||||
// Clear pending verification
|
||||
apiClient.setPendingEmailVerification('', false);
|
||||
|
||||
// Redirect to login
|
||||
window.location.href = '/login';
|
||||
|
||||
} catch (error) {
|
||||
console.error('Email verification failed:', error);
|
||||
}
|
||||
|
||||
// MFA verification
|
||||
try {
|
||||
const mfaResult = await apiClient.verifyMFA({
|
||||
email: 'user@example.com',
|
||||
code: '123456',
|
||||
deviceId: 'device-fingerprint',
|
||||
rememberDevice: true
|
||||
});
|
||||
|
||||
// Handle successful login
|
||||
handleLoginSuccess(mfaResult);
|
||||
|
||||
} catch (error) {
|
||||
console.error('MFA verification failed:', error);
|
||||
}
|
||||
|
||||
// Device management
|
||||
try {
|
||||
const devices = await apiClient.getTrustedDevices();
|
||||
|
||||
devices.forEach(device => {
|
||||
console.log(`Device: ${device.deviceName}, Last used: ${device.lastUsed}`);
|
||||
});
|
||||
|
||||
// Remove a device
|
||||
await apiClient.removeTrustedDevice('device-id-to-remove');
|
||||
|
||||
} catch (error) {
|
||||
console.error('Device management failed:', error);
|
||||
}
|
||||
|
||||
// Security log
|
||||
try {
|
||||
const securityEvents = await apiClient.getSecurityLog(30); // Last 30 days
|
||||
|
||||
securityEvents.forEach(event => {
|
||||
console.log(`${event.timestamp}: ${event.eventType} from ${event.details.deviceName}`);
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
console.error('Failed to load security log:', error);
|
||||
}
|
||||
*/
|
||||
|
||||
// ============================
|
||||
// React Hooks for Streaming with Date Conversion
|
||||
// ============================
|
||||
|
||||
/* React Hook Examples for Streaming Chat with proper date handling
|
||||
import { useState, useEffect, useCallback, useRef } from 'react';
|
||||
|
||||
export function useStreamingChat(sessionId: string) {
|
||||
const [messages, setMessages] = useState<Types.ChatMessage[]>([]);
|
||||
const [currentMessage, setCurrentMessage] = useState<Types.ChatMessage | null>(null);
|
||||
const [isStreaming, setIsStreaming] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
const apiClient = useApiClient();
|
||||
const streamingRef = useRef<StreamingResponse | null>(null);
|
||||
|
||||
const sendMessage = useCallback(async (query: Types.ChatQuery) => {
|
||||
setError(null);
|
||||
setIsStreaming(true);
|
||||
setCurrentMessage(null);
|
||||
|
||||
const streamingOptions: StreamingOptions = {
|
||||
onMessage: (message) => {
|
||||
// Message already has proper Date objects from conversion
|
||||
setCurrentMessage(message);
|
||||
},
|
||||
onStreaming: (chunk) => {
|
||||
// Chunk also has proper Date objects
|
||||
setCurrentMessage(prev => prev ?
|
||||
{
|
||||
...prev,
|
||||
content: prev.content + chunk.content,
|
||||
timestamp: chunk.timestamp // Update to latest timestamp
|
||||
} :
|
||||
{
|
||||
id: chunk.id || '',
|
||||
sessionId,
|
||||
status: 'streaming',
|
||||
sender: 'ai',
|
||||
content: chunk.content,
|
||||
timestamp: chunk.timestamp // Already a Date object
|
||||
}
|
||||
);
|
||||
},
|
||||
onStatusChange: (status) => {
|
||||
setCurrentMessage(prev => prev ? { ...prev, status } : null);
|
||||
},
|
||||
onComplete: () => {
|
||||
if (currentMessage) {
|
||||
setMessages(prev => [...prev, currentMessage]);
|
||||
}
|
||||
setCurrentMessage(null);
|
||||
setIsStreaming(false);
|
||||
},
|
||||
onError: (err) => {
|
||||
setError(typeof err === 'string' ? err : err.content);
|
||||
setIsStreaming(false);
|
||||
setCurrentMessage(null);
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
streamingRef.current = apiClient.sendMessageStream(sessionId, query, streamingOptions);
|
||||
await streamingRef.current.promise;
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Failed to send message');
|
||||
setIsStreaming(false);
|
||||
}
|
||||
}, [sessionId, apiClient, currentMessage]);
|
||||
|
||||
const cancelStreaming = useCallback(() => {
|
||||
if (streamingRef.current) {
|
||||
streamingRef.current.cancel();
|
||||
setIsStreaming(false);
|
||||
setCurrentMessage(null);
|
||||
}
|
||||
}, []);
|
||||
|
||||
return {
|
||||
messages,
|
||||
currentMessage,
|
||||
isStreaming,
|
||||
error,
|
||||
sendMessage,
|
||||
cancelStreaming
|
||||
};
|
||||
}
|
||||
|
||||
// Usage in React component with proper date handling:
|
||||
function ChatInterface({ sessionId }: { sessionId: string }) {
|
||||
const {
|
||||
messages,
|
||||
currentMessage,
|
||||
isStreaming,
|
||||
error,
|
||||
sendMessage,
|
||||
cancelStreaming
|
||||
} = useStreamingChat(sessionId);
|
||||
|
||||
const handleSendMessage = (text: string) => {
|
||||
sendMessage(text);
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div className="messages">
|
||||
{messages.map(message => (
|
||||
<div key={message.id}>
|
||||
<div className="message-header">
|
||||
<strong>{message.sender}:</strong>
|
||||
<span className="timestamp">
|
||||
{message.timestamp.toLocaleTimeString()}
|
||||
</span>
|
||||
</div>
|
||||
<div className="message-content">{message.content}</div>
|
||||
</div>
|
||||
))}
|
||||
|
||||
{currentMessage && (
|
||||
<div className="current-message">
|
||||
<div className="message-header">
|
||||
<strong>{currentMessage.sender}:</strong>
|
||||
<span className="timestamp">
|
||||
{currentMessage.timestamp.toLocaleTimeString()}
|
||||
</span>
|
||||
{isStreaming && <span className="streaming-indicator">...</span>}
|
||||
</div>
|
||||
<div className="message-content">{currentMessage.content}</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{error && <div className="error">{error}</div>}
|
||||
|
||||
<div className="input-area">
|
||||
<input
|
||||
type="text"
|
||||
onKeyPress={(e) => {
|
||||
if (e.key === 'Enter') {
|
||||
handleSendMessage(e.currentTarget.value);
|
||||
e.currentTarget.value = '';
|
||||
}
|
||||
}}
|
||||
disabled={isStreaming}
|
||||
/>
|
||||
{isStreaming && (
|
||||
<button onClick={cancelStreaming}>Cancel</button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
*/
|
||||
|
||||
// ============================
|
||||
// Usage Examples with Date Conversion
|
||||
// ============================
|
||||
|
||||
/*
|
||||
// Initialize API client
|
||||
const apiClient = new ApiClient();
|
||||
|
||||
// All returned objects now have proper Date fields automatically!
|
||||
|
||||
// Create a candidate - createdAt, updatedAt, lastLogin are Date objects
|
||||
try {
|
||||
const candidate = await apiClient.createCandidate({
|
||||
email: 'jane@example.com',
|
||||
username: 'jane_doe',
|
||||
password: 'SecurePassword123!',
|
||||
firstName: 'Jane',
|
||||
lastName: 'Doe'
|
||||
});
|
||||
|
||||
// These are now Date objects, not strings!
|
||||
console.log('Created at:', candidate.createdAt.toLocaleDateString());
|
||||
console.log('Profile created on:', candidate.createdAt.toDateString());
|
||||
|
||||
if (candidate.lastLogin) {
|
||||
console.log('Last seen:', candidate.lastLogin.toRelativeTimeString());
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to create candidate:', error);
|
||||
}
|
||||
|
||||
// Get jobs with proper date conversion
|
||||
try {
|
||||
const jobs = await apiClient.getJobs({ limit: 10 });
|
||||
|
||||
jobs.data.forEach(job => {
|
||||
// datePosted, applicationDeadline, featuredUntil are Date objects
|
||||
console.log(`${job.title} - Posted: ${job.datePosted.toLocaleDateString()}`);
|
||||
|
||||
if (job.applicationDeadline) {
|
||||
const daysRemaining = Math.ceil(
|
||||
(job.applicationDeadline.getTime() - new Date().getTime()) / (1000 * 60 * 60 * 24)
|
||||
);
|
||||
console.log(`Deadline in ${daysRemaining} days`);
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Failed to fetch jobs:', error);
|
||||
}
|
||||
|
||||
// Update and delete chat sessions with proper date handling
|
||||
try {
|
||||
// Update a session title
|
||||
const updatedSession = await apiClient.updateChatSession('session-id', {
|
||||
title: 'New Session Title',
|
||||
isArchived: false
|
||||
});
|
||||
|
||||
console.log('Updated session:', updatedSession.title);
|
||||
console.log('Last activity:', updatedSession.lastActivity.toLocaleString());
|
||||
|
||||
// Delete a session
|
||||
const deleteResult = await apiClient.deleteChatSession('session-id');
|
||||
console.log('Delete result:', deleteResult.message);
|
||||
} catch (error) {
|
||||
console.error('Failed to manage session:', error);
|
||||
}
|
||||
|
||||
// Streaming with proper date conversion
|
||||
const streamResponse = apiClient.sendMessageStream(sessionId, 'Tell me about job opportunities', {
|
||||
onStreaming: (chunk) => {
|
||||
// chunk.timestamp is a Date object
|
||||
console.log(`Streaming at ${chunk.timestamp.toLocaleTimeString()}:`, chunk.content);
|
||||
},
|
||||
onMessage: (message) => {
|
||||
// message.timestamp is a Date object
|
||||
console.log(`Final message at ${message.timestamp.toLocaleTimeString()}:`, message.content);
|
||||
},
|
||||
onComplete: () => {
|
||||
console.log('Streaming completed');
|
||||
}
|
||||
});
|
||||
|
||||
// Chat sessions with date conversion
|
||||
try {
|
||||
const chatSession = await apiClient.createChatSession({
|
||||
type: 'job_search',
|
||||
additionalContext: {}
|
||||
});
|
||||
|
||||
// createdAt and lastActivity are Date objects
|
||||
console.log('Session created:', chatSession.createdAt.toISOString());
|
||||
console.log('Last activity:', chatSession.lastActivity.toLocaleDateString());
|
||||
} catch (error) {
|
||||
console.error('Failed to create chat session:', error);
|
||||
}
|
||||
|
||||
// Get chat messages with date conversion
|
||||
try {
|
||||
const messages = await apiClient.getChatMessages(sessionId);
|
||||
|
||||
messages.data.forEach(message => {
|
||||
// timestamp is a Date object
|
||||
console.log(`[${message.timestamp.toLocaleString()}] ${message.sender}: ${message.content}`);
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Failed to fetch messages:', error);
|
||||
}
|
||||
*/
|
||||
|
||||
export { ApiClient }
|
||||
export type { StreamingOptions, StreamingResponse }
|
@ -1,6 +1,6 @@
|
||||
// Generated TypeScript types from Pydantic models
|
||||
// Source: src/backend/models.py
|
||||
// Generated on: 2025-06-03T15:05:33.759564
|
||||
// Generated on: 2025-06-03T18:51:32.304683
|
||||
// DO NOT EDIT MANUALLY - This file is auto-generated
|
||||
|
||||
// ============================
|
||||
@ -13,9 +13,9 @@ export type ActivityType = "login" | "search" | "view_job" | "apply_job" | "mess
|
||||
|
||||
export type ApplicationStatus = "applied" | "reviewing" | "interview" | "offer" | "rejected" | "accepted" | "withdrawn";
|
||||
|
||||
export type ChatContextType = "job_search" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "rag_search";
|
||||
export type ChatContextType = "job_search" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_image" | "rag_search";
|
||||
|
||||
export type ChatMessageType = "error" | "generating" | "info" | "preparing" | "processing" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||
export type ChatMessageType = "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||
|
||||
export type ChatSenderType = "user" | "assistant" | "system";
|
||||
|
||||
@ -272,7 +272,7 @@ export interface Certification {
|
||||
}
|
||||
|
||||
export interface ChatContext {
|
||||
type: "job_search" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "rag_search";
|
||||
type: "job_search" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_image" | "rag_search";
|
||||
relatedEntityId?: string;
|
||||
relatedEntityType?: "job" | "candidate" | "employer";
|
||||
additionalContext?: Record<string, any>;
|
||||
@ -283,7 +283,7 @@ export interface ChatMessage {
|
||||
sessionId: string;
|
||||
senderId?: string;
|
||||
status: "initializing" | "streaming" | "done" | "error";
|
||||
type: "error" | "generating" | "info" | "preparing" | "processing" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||
type: "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||
sender: "user" | "assistant" | "system";
|
||||
timestamp: Date;
|
||||
tunables?: Tunables;
|
||||
@ -296,7 +296,7 @@ export interface ChatMessageBase {
|
||||
sessionId: string;
|
||||
senderId?: string;
|
||||
status: "initializing" | "streaming" | "done" | "error";
|
||||
type: "error" | "generating" | "info" | "preparing" | "processing" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||
type: "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||
sender: "user" | "assistant" | "system";
|
||||
timestamp: Date;
|
||||
tunables?: Tunables;
|
||||
|
106
src/backend/agents/generate_image.py
Normal file
106
src/backend/agents/generate_image.py
Normal file
@ -0,0 +1,106 @@
|
||||
from __future__ import annotations
|
||||
from datetime import UTC, datetime
|
||||
from pydantic import model_validator, Field, BaseModel # type: ignore
|
||||
from typing import (
|
||||
Dict,
|
||||
Literal,
|
||||
ClassVar,
|
||||
cast,
|
||||
Any,
|
||||
AsyncGenerator,
|
||||
List,
|
||||
Optional
|
||||
# override
|
||||
) # NOTE: You must import Optional for late binding to work
|
||||
import inspect
|
||||
import random
|
||||
import re
|
||||
import json
|
||||
import traceback
|
||||
import asyncio
|
||||
import time
|
||||
import asyncio
|
||||
import time
|
||||
import os
|
||||
import hashlib
|
||||
|
||||
from .base import Agent, agent_registry, LLMMessage
|
||||
from models import Candidate, ChatMessage, ChatMessageBase, ChatMessageMetaData, ChatMessageType, ChatMessageUser, ChatOptions, ChatSenderType, ChatStatusType
|
||||
import model_cast
|
||||
from logger import logger
|
||||
import defines
|
||||
|
||||
from image_generator.image_model_cache import ImageModelCache
|
||||
from image_generator.profile_image import generate_image, ImageRequest
|
||||
|
||||
seed = int(time.time())
|
||||
random.seed(seed)
|
||||
|
||||
class ImageGenerator(Agent):
|
||||
agent_type: Literal["generate_image"] = "generate_image" # type: ignore
|
||||
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
||||
agent_persist: bool = False
|
||||
|
||||
system_prompt: str = "" # No system prompt is used
|
||||
|
||||
async def generate(
|
||||
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate, temperature=0.7
|
||||
) -> AsyncGenerator[ChatMessage, None]:
|
||||
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||
|
||||
file_path = os.path.join(defines.user_dir, user.username, "profile.png")
|
||||
chat_message = ChatMessage(
|
||||
session_id=user_message.session_id,
|
||||
tunables=user_message.tunables,
|
||||
status=ChatStatusType.INITIALIZING,
|
||||
type=ChatMessageType.PREPARING,
|
||||
sender=ChatSenderType.ASSISTANT,
|
||||
content="",
|
||||
timestamp=datetime.now(UTC)
|
||||
)
|
||||
|
||||
chat_message.metadata = ChatMessageMetaData()
|
||||
try:
|
||||
#
|
||||
# Generate the profile picture
|
||||
#
|
||||
chat_message.content = f"Generating: {user_message.content}"
|
||||
yield chat_message
|
||||
|
||||
logger.info(f"Image generation: {file_path} <- {user_message.content}")
|
||||
request = ImageRequest(filepath=file_path, prompt=user_message.content, iterations=4, height=256, width=256, guidance_scale=7.5)
|
||||
generated_message = None
|
||||
async for generated_message in generate_image(
|
||||
user_message=user_message,
|
||||
request=request
|
||||
):
|
||||
if generated_message.status != "done":
|
||||
yield generated_message
|
||||
|
||||
if generated_message is None:
|
||||
chat_message.status = ChatStatusType.ERROR
|
||||
chat_message.content = "Image generation failed."
|
||||
yield chat_message
|
||||
return
|
||||
|
||||
logger.info("Image generation done...")
|
||||
|
||||
user.profile_image = "profile.png"
|
||||
|
||||
# Image generated
|
||||
generated_message.status = ChatStatusType.DONE
|
||||
generated_message.content = f"{defines.api_prefix}/profile/{user.username}"
|
||||
yield generated_message
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
chat_message.status = ChatStatusType.ERROR
|
||||
logger.error(traceback.format_exc())
|
||||
logger.error(chat_message.content)
|
||||
chat_message.content = f"Error in image generation: {str(e)}"
|
||||
logger.error(chat_message.content)
|
||||
yield chat_message
|
||||
return
|
||||
|
||||
# Register the base agent
|
||||
agent_registry.register(ImageGenerator._agent_type, ImageGenerator)
|
@ -70,7 +70,8 @@ You will be provided with defaults to use if not specified by the user:
|
||||
|
||||
Additional information provided in the user message can override those defaults.
|
||||
|
||||
You need to randomly assign an English username (can include numbers), a first name, last name, and a two English sentence description of that individual's work given the demographics provided.
|
||||
You need to randomly assign an English username (can include numbers), a first name, last name, and a two English sentence description of
|
||||
that individual's work given the demographics provided.
|
||||
|
||||
Your response must be in JSON.
|
||||
Provide only the JSON response, and match the field names EXACTLY.
|
||||
@ -89,7 +90,8 @@ Provide all information in English ONLY, with no other commentary:
|
||||
|
||||
Make sure to provide a username and that the field name for the job description is "description".
|
||||
|
||||
DO NOT infer, imply, abbreviate, or state the ethnicity or age in the username or description. You are providing those only for use later by the system when casting individuals for the role.
|
||||
DO NOT infer, imply, abbreviate, or state the ethnicity, gender, or age in the username or description.
|
||||
You are providing those only for use later by the system when casting individuals for the role.
|
||||
"""
|
||||
|
||||
generate_resume_system_prompt = """
|
||||
@ -381,6 +383,7 @@ class GeneratePersona(Agent):
|
||||
):
|
||||
self.randomize()
|
||||
|
||||
status_message = ChatMessage(session_id=user_message.session_id)
|
||||
original_prompt = user_message.content
|
||||
|
||||
user_message.content = f"""\
|
||||
@ -401,10 +404,10 @@ class GeneratePersona(Agent):
|
||||
Incorporate the following into the job description: {original_prompt}
|
||||
"""
|
||||
|
||||
|
||||
#
|
||||
# Generate the persona
|
||||
#
|
||||
logger.info(f"🤖 Generating persona for {self.full_name}")
|
||||
generating_message = None
|
||||
async for generating_message in self.call_llm(
|
||||
llm=llm, model=model,
|
||||
@ -416,9 +419,6 @@ Incorporate the following into the job description: {original_prompt}
|
||||
logger.error(f"Error generating persona: {generating_message.content}")
|
||||
raise Exception(generating_message.content)
|
||||
|
||||
if generating_message.status != ChatStatusType.DONE:
|
||||
yield generating_message
|
||||
|
||||
if not generating_message:
|
||||
raise Exception("No response from LLM during persona generation")
|
||||
|
||||
@ -475,111 +475,68 @@ Incorporate the following into the job description: {original_prompt}
|
||||
yield generating_message
|
||||
return
|
||||
|
||||
logger.info(f"✅ Persona for {persona['username']} generated successfully")
|
||||
|
||||
# Persona generated
|
||||
generating_message.content = json.dumps(persona)
|
||||
generating_message.status = ChatStatusType.DONE
|
||||
generating_message.type = ChatMessageType.RESPONSE
|
||||
status_message = ChatMessage(
|
||||
session_id=user_message.session_id,
|
||||
status = ChatStatusType.STATUS,
|
||||
type = ChatMessageType.RESPONSE,
|
||||
content = json.dumps(persona)
|
||||
)
|
||||
yield status_message
|
||||
|
||||
yield generating_message
|
||||
#
|
||||
# Generate the resume
|
||||
#
|
||||
status_message = ChatMessage(
|
||||
session_id=user_message.session_id,
|
||||
status = ChatStatusType.STATUS,
|
||||
type = ChatMessageType.THINKING,
|
||||
content = f"Generating resume for {persona['full_name']}..."
|
||||
)
|
||||
logger.info(f"🤖 {status_message.content}")
|
||||
yield status_message
|
||||
|
||||
# #
|
||||
# # Generate the resume
|
||||
# #
|
||||
# message.status = "thinking"
|
||||
# message.response = f"Generating resume for {persona['full_name']}..."
|
||||
# yield message
|
||||
user_message.content = f"""
|
||||
```json
|
||||
{{
|
||||
"full_name": "{persona["full_name"]}",
|
||||
"location": "{persona["location"]}",
|
||||
"age": {persona["age"]},
|
||||
"description": {persona["description"]},
|
||||
"title": {persona["title"]},
|
||||
"email": {persona["email"]},
|
||||
"phone": {persona["phone"]}
|
||||
}}
|
||||
```
|
||||
"""
|
||||
if original_prompt:
|
||||
user_message.content += f"""
|
||||
Make sure at least one of the candidate's job descriptions take into account the following: {original_prompt}."""
|
||||
|
||||
# prompt = f"""
|
||||
# ```json
|
||||
# {{
|
||||
# "full_name": "{persona["full_name"]}",
|
||||
# "location": "{persona["location"]}",
|
||||
# "age": {persona["age"]},
|
||||
# "description": {persona["description"]},
|
||||
# "title": {persona["title"]},
|
||||
# "email": {persona["email"]},
|
||||
# "phone": {persona["phone"]}
|
||||
# }}
|
||||
# ```
|
||||
# """
|
||||
# if original_prompt:
|
||||
# prompt += f"""
|
||||
# Make sure at least one of the candidate's job descriptions take into account the following: {original_prompt}."""
|
||||
# try:
|
||||
# async for message in self.call_llm(
|
||||
# message=message, system_prompt=generate_resume_system_prompt, prompt=prompt
|
||||
# ):
|
||||
# if message.status != "done":
|
||||
# yield message
|
||||
# if message.status == "error":
|
||||
# raise Exception(message.response)
|
||||
async for generating_message in self.call_llm(
|
||||
llm=llm, model=model,
|
||||
user_message=user_message,
|
||||
system_prompt=generate_resume_system_prompt,
|
||||
temperature=temperature,
|
||||
):
|
||||
if generating_message.status == ChatStatusType.ERROR:
|
||||
logger.error(f"❌ Error generating resume: {generating_message.content}")
|
||||
raise Exception(generating_message.content)
|
||||
|
||||
# except Exception as e:
|
||||
# message.response = f"Unable to parse LLM returned content: {json_str} {str(e)}"
|
||||
# message.status = "error"
|
||||
# logger.error(traceback.format_exc())
|
||||
# logger.error(message.response)
|
||||
# yield message
|
||||
# return
|
||||
if not generating_message:
|
||||
raise Exception("No response from LLM during persona generation")
|
||||
|
||||
# resume = self.extract_markdown_from_text(message.response)
|
||||
# if resume:
|
||||
# user_resume_dir = os.path.join(defines.user_dir, persona["username"], defines.resume_doc_dir)
|
||||
# os.makedirs(user_resume_dir, exist_ok=True)
|
||||
# user_resume_file = os.path.join(user_resume_dir, defines.resume_doc)
|
||||
# with open(user_resume_file, "w") as f:
|
||||
# f.write(resume)
|
||||
|
||||
# # Resume generated
|
||||
# message.response = resume
|
||||
# message.status = "partial"
|
||||
# yield message
|
||||
|
||||
# #
|
||||
# # Generate RAG database
|
||||
# #
|
||||
# message.status = "thinking"
|
||||
# message.response = f"Generating RAG content from resume..."
|
||||
# yield message
|
||||
|
||||
# # Prior to instancing a new User, the json data has to be created
|
||||
# # so the system can process it
|
||||
# user_dir = os.path.join(defines.user_dir, persona["username"])
|
||||
# os.makedirs(user_dir, exist_ok=True)
|
||||
# user_info = os.path.join(user_dir, "info.json")
|
||||
# with open(user_info, "w") as f:
|
||||
# f.write(json.dumps(persona, indent=2))
|
||||
|
||||
# user = User(llm=self.llm, username=self.username)
|
||||
# await user.initialize()
|
||||
# await user.file_watcher.initialize_collection()
|
||||
# # RAG content generated
|
||||
# message.response = f"{user.file_watcher.collection.count()} entries created in RAG vector store."
|
||||
|
||||
# #
|
||||
# # Write out the completed user information
|
||||
# #
|
||||
# with open(user_info, "w") as f:
|
||||
# f.write(json.dumps(persona, indent=2))
|
||||
|
||||
# # Image generated
|
||||
# message.status = "done"
|
||||
# message.response = json.dumps(persona)
|
||||
|
||||
# except Exception as e:
|
||||
# message.status = "error"
|
||||
# logger.error(traceback.format_exc())
|
||||
# logger.error(message.response)
|
||||
# message.response = f"Error in persona generation: {str(e)}"
|
||||
# logger.error(message.response)
|
||||
# yield message
|
||||
# return
|
||||
|
||||
# # Done processing, add message to conversation
|
||||
# self.context.processing = False
|
||||
# # Return the final message
|
||||
# yield message
|
||||
# return
|
||||
resume = self.extract_markdown_from_text(generating_message.content)
|
||||
status_message = ChatMessage(
|
||||
session_id=user_message.session_id,
|
||||
status=ChatStatusType.DONE,
|
||||
type=ChatMessageType.RESPONSE,
|
||||
content=resume
|
||||
)
|
||||
yield status_message
|
||||
return
|
||||
|
||||
def extract_json_from_text(self, text: str) -> str:
|
||||
"""Extract JSON string from text that may contain other content."""
|
||||
|
@ -60,6 +60,38 @@ class EntityManager:
|
||||
|
||||
return entity
|
||||
|
||||
async def remove_entity(self, candidate_id: str) -> bool:
|
||||
"""
|
||||
Immediately remove and cleanup a candidate entity from active persistence.
|
||||
This should be called when a candidate is being deleted from the system.
|
||||
|
||||
Args:
|
||||
candidate_id: The ID of the candidate entity to remove
|
||||
|
||||
Returns:
|
||||
bool: True if entity was found and removed, False if not found
|
||||
"""
|
||||
try:
|
||||
# Check if entity exists
|
||||
entity = self._entities.get(candidate_id)
|
||||
if not entity:
|
||||
print(f"Entity {candidate_id} not found in active persistence")
|
||||
return False
|
||||
|
||||
# Remove from tracking dictionaries
|
||||
self._entities.pop(candidate_id, None)
|
||||
self._weak_refs.pop(candidate_id, None)
|
||||
|
||||
# Cleanup the entity
|
||||
await entity.cleanup()
|
||||
|
||||
print(f"Successfully removed entity {candidate_id} from active persistence")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error removing entity {candidate_id}: {e}")
|
||||
return False
|
||||
|
||||
def _on_entity_deleted(self, user_id: str):
|
||||
"""Callback when entity is garbage collected"""
|
||||
def cleanup_callback(weak_ref):
|
||||
|
101
src/backend/image_generator/image_model_cache.py
Normal file
101
src/backend/image_generator/image_model_cache.py
Normal file
@ -0,0 +1,101 @@
|
||||
import asyncio
|
||||
import gc
|
||||
import re
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
import torch # type: ignore
|
||||
from diffusers import StableDiffusionPipeline, FluxPipeline # type: ignore
|
||||
|
||||
class ImageModelCache: # Stay loaded for 3 hours
|
||||
def __init__(self, timeout_seconds: float = 3 * 60 * 60):
|
||||
self._pipe = None
|
||||
self._model_name = None
|
||||
self._device = None
|
||||
self._last_access_time = 0
|
||||
self._timeout_seconds = timeout_seconds
|
||||
self._lock = asyncio.Lock()
|
||||
self._cleanup_task = None
|
||||
|
||||
async def start(self):
|
||||
if self._cleanup_task is None:
|
||||
self._cleanup_task = asyncio.create_task(self._periodic_cleanup())
|
||||
|
||||
def _get_model_type(self, model_name: str) -> str:
|
||||
if re.search(r"stable-diffusion", model_name, re.IGNORECASE):
|
||||
return "stable"
|
||||
return "flux"
|
||||
|
||||
async def get_pipeline(self, model: str, device: str) -> Any:
|
||||
await self.start() # Ensure cleanup task starts on first use
|
||||
|
||||
async with self._lock:
|
||||
current_time = time.time()
|
||||
|
||||
current_model_type = self._get_model_type(model)
|
||||
cached_model_type = self._get_model_type(self._model_name) if self._model_name else None
|
||||
|
||||
if (
|
||||
self._pipe is not None and
|
||||
self._model_name == model and
|
||||
self._device == device and
|
||||
current_model_type == cached_model_type and
|
||||
current_time - self._last_access_time < self._timeout_seconds
|
||||
):
|
||||
self._last_access_time = current_time
|
||||
return self._pipe
|
||||
|
||||
await self._unload_model()
|
||||
|
||||
if current_model_type == "stable":
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
model,
|
||||
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
|
||||
)
|
||||
def dummy_safety_checker(images, clip_input):
|
||||
return images, [False] * len(images)
|
||||
pipe.safety_checker = dummy_safety_checker
|
||||
else:
|
||||
pipe = FluxPipeline.from_pretrained(
|
||||
model,
|
||||
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
|
||||
)
|
||||
try:
|
||||
pipe.load_lora_weights('enhanceaiteam/Flux-uncensored', weight_name='lora.safetensors')
|
||||
except Exception as e:
|
||||
raise Exception(f"Failed to load LoRA weights: {str(e)}")
|
||||
|
||||
pipe = pipe.to(device)
|
||||
self._pipe = pipe
|
||||
self._model_name = model
|
||||
self._device = device
|
||||
self._last_access_time = current_time
|
||||
return pipe
|
||||
|
||||
async def _unload_model(self):
|
||||
if self._pipe is not None:
|
||||
try:
|
||||
del self._pipe
|
||||
gc.collect()
|
||||
if self._device == "cuda":
|
||||
torch.cuda.empty_cache()
|
||||
elif self._device == "xpu":
|
||||
torch.xpu.empty_cache()
|
||||
except Exception:
|
||||
pass
|
||||
self._pipe = None
|
||||
self._model_name = None
|
||||
self._device = None
|
||||
|
||||
async def cleanup_if_expired(self):
|
||||
async with self._lock:
|
||||
if (
|
||||
self._pipe is not None and
|
||||
time.time() - self._last_access_time >= self._timeout_seconds
|
||||
):
|
||||
await self._unload_model()
|
||||
|
||||
async def _periodic_cleanup(self):
|
||||
while True:
|
||||
await asyncio.sleep(self._timeout_seconds)
|
||||
await self.cleanup_if_expired()
|
294
src/backend/image_generator/profile_image.py
Normal file
294
src/backend/image_generator/profile_image.py
Normal file
@ -0,0 +1,294 @@
|
||||
from __future__ import annotations
|
||||
from datetime import UTC, datetime
|
||||
from pydantic import BaseModel, Field # type: ignore
|
||||
from typing import Dict, Literal, Any, AsyncGenerator, Optional
|
||||
import inspect
|
||||
import random
|
||||
import re
|
||||
import json
|
||||
import traceback
|
||||
import asyncio
|
||||
import time
|
||||
import os
|
||||
import gc
|
||||
import tempfile
|
||||
import uuid
|
||||
import torch # type: ignore
|
||||
import asyncio
|
||||
import time
|
||||
import json
|
||||
from typing import AsyncGenerator
|
||||
from threading import Thread
|
||||
import queue
|
||||
import uuid
|
||||
|
||||
from .image_model_cache import ImageModelCache
|
||||
|
||||
from models import Candidate, ChatMessage, ChatMessageBase, ChatMessageMetaData, ChatMessageType, ChatMessageUser, ChatOptions, ChatSenderType, ChatStatusType
|
||||
from logger import logger
|
||||
|
||||
from image_generator.image_model_cache import ImageModelCache
|
||||
|
||||
# Heuristic time estimates (in seconds) for different models and devices at 512x512
|
||||
TIME_ESTIMATES = {
|
||||
"flux": {
|
||||
"cuda": {"load": 10, "per_step": 0.8},
|
||||
"xpu": {"load": 15, "per_step": 1.0},
|
||||
"cpu": {"load": 30, "per_step": 10.0},
|
||||
}
|
||||
}
|
||||
|
||||
class ImageRequest(BaseModel):
|
||||
filepath: str
|
||||
prompt: str
|
||||
model: str = "black-forest-labs/FLUX.1-schnell"
|
||||
iterations: int = 4
|
||||
height: int = 256
|
||||
width: int = 256
|
||||
guidance_scale: float = 7.5
|
||||
|
||||
# Global model cache instance
|
||||
model_cache = ImageModelCache()
|
||||
|
||||
def flux_worker(pipe: Any, params: ImageRequest, status_queue: queue.Queue, task_id: str):
|
||||
"""Background worker for Flux image generation"""
|
||||
try:
|
||||
# Your existing estimates calculation
|
||||
estimates = {"per_step": 0.5} # Replace with your actual estimates
|
||||
resolution_scale = (params.height * params.width) / (512 * 512)
|
||||
|
||||
# Flux: Run generation in the background and yield progress updates
|
||||
estimated_gen_time = estimates["per_step"] * params.iterations * resolution_scale
|
||||
status_queue.put({
|
||||
"status": "running",
|
||||
"message": f"Initializing image generation...",
|
||||
"estimated_time_remaining": estimated_gen_time,
|
||||
"progress": 0
|
||||
})
|
||||
|
||||
# Start the generation task
|
||||
start_gen_time = time.time()
|
||||
|
||||
# Simulate your pipe call with progress updates
|
||||
def status_callback(pipeline, step, timestep, callback_kwargs):
|
||||
# Send progress updates
|
||||
progress = int((step+1) / params.iterations * 100)
|
||||
|
||||
status_queue.put({
|
||||
"status": "running",
|
||||
"message": f"Processing step {step+1}/{params.iterations} ({progress}%) complete.",
|
||||
"progress": progress
|
||||
})
|
||||
return callback_kwargs
|
||||
|
||||
# Replace this block with your actual Flux pipe call:
|
||||
image = pipe(
|
||||
params.prompt,
|
||||
num_inference_steps=params.iterations,
|
||||
guidance_scale=7.5,
|
||||
height=params.height,
|
||||
width=params.width,
|
||||
callback_on_step_end=status_callback,
|
||||
).images[0]
|
||||
|
||||
gen_time = time.time() - start_gen_time
|
||||
per_step_time = gen_time / params.iterations if params.iterations > 0 else gen_time
|
||||
|
||||
logger.info(f"Saving to {params.filepath}")
|
||||
image.save(params.filepath)
|
||||
|
||||
# Final completion status
|
||||
status_queue.put({
|
||||
"status": "completed",
|
||||
"message": f"Image generated in {gen_time:.1f} seconds, {per_step_time:.1f} per iteration.",
|
||||
"progress": 100,
|
||||
"generation_time": gen_time,
|
||||
"per_step_time": per_step_time,
|
||||
"image_path": params.filepath
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(traceback.format_exc())
|
||||
logger.error(e)
|
||||
status_queue.put({
|
||||
"status": "error",
|
||||
"message": f"Generation failed: {str(e)}",
|
||||
"error": str(e),
|
||||
"progress": 0
|
||||
})
|
||||
|
||||
|
||||
async def async_generate_image(pipe: Any, params: ImageRequest) -> AsyncGenerator[Dict[str, Any], None]:
|
||||
"""
|
||||
Single async function that handles background Flux generation with status streaming
|
||||
"""
|
||||
task_id = str(uuid.uuid4())
|
||||
status_queue = queue.Queue()
|
||||
worker_thread = None
|
||||
|
||||
try:
|
||||
# Start background worker thread
|
||||
worker_thread = Thread(
|
||||
target=flux_worker,
|
||||
args=(pipe, params, status_queue, task_id),
|
||||
daemon=True
|
||||
)
|
||||
worker_thread.start()
|
||||
|
||||
# Initial status
|
||||
yield {'status': 'starting', 'task_id': task_id, 'message': 'Initializing image generation'}
|
||||
|
||||
# Stream status updates
|
||||
completed = False
|
||||
last_heartbeat = time.time()
|
||||
|
||||
while not completed and worker_thread.is_alive():
|
||||
try:
|
||||
# Try to get status update (non-blocking)
|
||||
status_update = status_queue.get_nowait()
|
||||
|
||||
# Add task_id to status update
|
||||
status_update['task_id'] = task_id
|
||||
|
||||
# Send status update
|
||||
yield status_update
|
||||
|
||||
# Check if completed
|
||||
if status_update.get('status') in ['completed', 'error']:
|
||||
completed = True
|
||||
|
||||
last_heartbeat = time.time()
|
||||
|
||||
except queue.Empty:
|
||||
# No new status, send heartbeat if needed
|
||||
current_time = time.time()
|
||||
if current_time - last_heartbeat > 2: # Heartbeat every 2 seconds
|
||||
heartbeat = {
|
||||
'status': 'heartbeat',
|
||||
'task_id': task_id,
|
||||
'timestamp': current_time
|
||||
}
|
||||
yield heartbeat
|
||||
last_heartbeat = current_time
|
||||
|
||||
# Brief sleep to prevent busy waiting
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
# Handle thread completion or timeout
|
||||
if not completed:
|
||||
if worker_thread.is_alive():
|
||||
# Thread still running but we might have missed the completion signal
|
||||
timeout_status = {
|
||||
'status': 'timeout',
|
||||
'task_id': task_id,
|
||||
'message': 'Generation timed out or connection lost'
|
||||
}
|
||||
yield timeout_status
|
||||
else:
|
||||
# Thread completed but we might have missed the final status
|
||||
final_status = {
|
||||
'status': 'completed',
|
||||
'task_id': task_id,
|
||||
'message': 'Generation completed'
|
||||
}
|
||||
yield final_status
|
||||
|
||||
except Exception as e:
|
||||
error_status = {
|
||||
'status': 'error',
|
||||
'task_id': task_id,
|
||||
'message': f'Server error: {str(e)}',
|
||||
'error': str(e)
|
||||
}
|
||||
logger.error(error_status)
|
||||
yield error_status
|
||||
|
||||
finally:
|
||||
# Cleanup: ensure thread completion
|
||||
if worker_thread and 'worker_thread' in locals() and worker_thread.is_alive():
|
||||
worker_thread.join(timeout=1.0) # Wait up to 1 second for cleanup
|
||||
|
||||
def status(chat_message: ChatMessage, status: str, progress: float = 0, estimated_time_remaining="...") -> ChatMessage:
|
||||
"""Update chat message status and return it."""
|
||||
message = chat_message.copy(deep=True)
|
||||
message.id = str(uuid.uuid4())
|
||||
message.timestamp = datetime.now(UTC)
|
||||
message.type = ChatMessageType.THINKING
|
||||
message.status = ChatStatusType.STREAMING
|
||||
message.content = status
|
||||
return message
|
||||
|
||||
async def generate_image(user_message: ChatMessage, request: ImageRequest) -> AsyncGenerator[ChatMessage, None]:
|
||||
"""Generate an image with specified dimensions and yield status updates with time estimates."""
|
||||
chat_message = ChatMessage(
|
||||
session_id=user_message.session_id,
|
||||
tunables=user_message.tunables,
|
||||
status=ChatStatusType.INITIALIZING,
|
||||
type=ChatMessageType.PREPARING,
|
||||
sender=ChatSenderType.ASSISTANT,
|
||||
content="",
|
||||
timestamp=datetime.now(UTC)
|
||||
)
|
||||
try:
|
||||
# Validate prompt
|
||||
prompt = user_message.content.strip()
|
||||
if not prompt:
|
||||
chat_message.status = ChatStatusType.ERROR
|
||||
chat_message.content = "Prompt cannot be empty"
|
||||
yield chat_message
|
||||
return
|
||||
|
||||
# Validate dimensions
|
||||
if request.height <= 0 or request.width <= 0:
|
||||
chat_message.status = ChatStatusType.ERROR
|
||||
chat_message.content = "Height and width must be positive"
|
||||
yield chat_message
|
||||
return
|
||||
|
||||
filedir = os.path.dirname(request.filepath)
|
||||
filename = os.path.basename(request.filepath)
|
||||
os.makedirs(filedir, exist_ok=True)
|
||||
|
||||
model_type = "flux"
|
||||
device = "cpu"
|
||||
|
||||
yield status(chat_message, f"Starting image generation...")
|
||||
|
||||
# Get initial time estimate, scaled by resolution
|
||||
estimates = TIME_ESTIMATES[model_type][device]
|
||||
resolution_scale = (request.height * request.width) / (512 * 512)
|
||||
estimated_total = estimates["load"] + estimates["per_step"] * request.iterations * resolution_scale
|
||||
yield status(chat_message, f"Estimated generation time: ~{estimated_total:.1f} seconds for {request.width}x{request.height}")
|
||||
|
||||
# Initialize or get cached pipeline
|
||||
start_time = time.time()
|
||||
yield status(chat_message, f"Loading generative image model...")
|
||||
pipe = await model_cache.get_pipeline(request.model, device)
|
||||
load_time = time.time() - start_time
|
||||
yield status(chat_message, f"Model loaded in {load_time:.1f} seconds.", progress=10)
|
||||
|
||||
async for status_message in async_generate_image(pipe, request):
|
||||
chat_message.content = json.dumps(status_message) # Merge properties from async_generate_image over the message...
|
||||
chat_message.type = ChatMessageType.HEARTBEAT if status_message.get("status") == "heartbeat" else ChatMessageType.THINKING
|
||||
if chat_message.type != ChatMessageType.HEARTBEAT:
|
||||
logger.info(chat_message.content)
|
||||
yield chat_message
|
||||
|
||||
# Final result
|
||||
total_time = time.time() - start_time
|
||||
chat_message.status = ChatStatusType.DONE
|
||||
chat_message.type = ChatMessageType.RESPONSE
|
||||
chat_message.content = json.dumps({
|
||||
"status": f"Image generation complete in {total_time:.1f} seconds",
|
||||
"progress": 100,
|
||||
"filename": request.filepath
|
||||
})
|
||||
yield chat_message
|
||||
|
||||
except Exception as e:
|
||||
chat_message.status = ChatStatusType.ERROR
|
||||
chat_message.content = str(e)
|
||||
yield chat_message
|
||||
logger.error(traceback.format_exc())
|
||||
logger.error(chat_message.content)
|
||||
return
|
@ -238,10 +238,10 @@ async def get_current_user(
|
||||
"""Get current user from database"""
|
||||
try:
|
||||
# Check candidates
|
||||
candidate = await database.get_candidate(user_id)
|
||||
if candidate:
|
||||
candidate_data = await database.get_candidate(user_id)
|
||||
if candidate_data:
|
||||
# logger.info(f"🔑 Current user is candidate: {candidate['id']}")
|
||||
return Candidate.model_validate(candidate)
|
||||
return Candidate.model_validate(candidate_data) if not candidate_data.get("is_AI") else CandidateAI.model_validate(candidate_data)
|
||||
|
||||
# Check employers
|
||||
employer = await database.get_employer(user_id)
|
||||
@ -667,15 +667,27 @@ async def create_candidate_ai(
|
||||
)
|
||||
|
||||
persona_message = None
|
||||
resume_message = None
|
||||
state = 0 # 0 -- create persona, 1 -- create resume
|
||||
async for generated_message in generate_agent.generate(
|
||||
llm=llm_manager.get_llm(),
|
||||
model=defines.model,
|
||||
user_message=user_message,
|
||||
user=None,
|
||||
):
|
||||
if generated_message.status == ChatStatusType.ERROR:
|
||||
logger.error(f"❌ AI generation error: {generated_message.content}")
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content=create_error_response("AI_GENERATION_ERROR", generated_message.content)
|
||||
)
|
||||
if generated_message.type == ChatMessageType.RESPONSE and state == 0:
|
||||
persona_message = generated_message
|
||||
state = 1 # Switch to resume generation
|
||||
elif generated_message.type == ChatMessageType.RESPONSE and state == 1:
|
||||
resume_message = generated_message
|
||||
|
||||
if not persona_message or persona_message.status != ChatStatusType.DONE:
|
||||
if not persona_message:
|
||||
logger.error(f"❌ AI generation failed: {persona_message.content if persona_message else 'No message generated'}")
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
@ -686,12 +698,12 @@ async def create_candidate_ai(
|
||||
current_time = datetime.now(timezone.utc)
|
||||
candidate_data = json.loads(persona_message.content)
|
||||
candidate_data.update({
|
||||
"userType": "candidate",
|
||||
"createdAt": current_time.isoformat(),
|
||||
"updatedAt": current_time.isoformat(),
|
||||
"user_type": "candidate",
|
||||
"created_at": current_time.isoformat(),
|
||||
"updated_at": current_time.isoformat(),
|
||||
"status": "active", # Directly active for AI-generated candidates
|
||||
"isAdmin": False, # Default to non-admin
|
||||
"isAI": True, # Mark as AI-generated
|
||||
"is_admin": False, # Default to non-admin
|
||||
"is_AI": True, # Mark as AI-generated
|
||||
})
|
||||
candidate = CandidateAI.model_validate(candidate_data)
|
||||
except ValidationError as e:
|
||||
@ -716,15 +728,66 @@ async def create_candidate_ai(
|
||||
)
|
||||
|
||||
logger.info(f"🤖 AI-generated candidate {candidate.username} created with email {candidate.email}")
|
||||
candidate_data = candidate.model_dump()
|
||||
candidate_data = candidate.model_dump(by_alias=False, exclude_unset=False)
|
||||
# Store in database
|
||||
await database.set_candidate(candidate.id, candidate_data)
|
||||
|
||||
logger.info(f"✅ AI-generated candidate created: {candidate_data['email']}")
|
||||
user_auth_data = {
|
||||
"id": candidate.id,
|
||||
"type": "candidate",
|
||||
"email": candidate.email,
|
||||
"username": candidate.username
|
||||
}
|
||||
|
||||
await database.set_user(candidate.email, user_auth_data)
|
||||
await database.set_user(candidate.username, user_auth_data)
|
||||
await database.set_user_by_id(candidate.id, user_auth_data)
|
||||
|
||||
document_content = None
|
||||
if resume_message:
|
||||
document_id = str(uuid.uuid4())
|
||||
document_type = DocumentType.MARKDOWN
|
||||
document_content = resume_message.content.encode('utf-8')
|
||||
document_filename = f"resume.md"
|
||||
|
||||
document_data = Document(
|
||||
id=document_id,
|
||||
filename=document_filename,
|
||||
originalName=document_filename,
|
||||
type=document_type,
|
||||
size=len(document_content),
|
||||
upload_date=datetime.now(UTC),
|
||||
include_in_RAG=True,
|
||||
owner_id=candidate.id
|
||||
)
|
||||
file_path = os.path.join(defines.user_dir, candidate.username, "rag-content", document_filename)
|
||||
# Ensure the directory exists
|
||||
rag_content_dir = pathlib.Path(defines.user_dir) / candidate.username / "rag-content"
|
||||
rag_content_dir.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(document_content)
|
||||
|
||||
logger.info(f"📁 File saved to disk: {file_path}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to save file to disk: {e}")
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content=create_error_response("FILE_SAVE_ERROR", "Failed to resume file to disk")
|
||||
)
|
||||
|
||||
# Store document metadata in database
|
||||
await database.set_document(document_id, document_data.model_dump())
|
||||
await database.add_document_to_candidate(candidate.id, document_id)
|
||||
logger.info(f"📄 Document metadata saved for candidate {candidate.id}: {document_id}")
|
||||
|
||||
logger.info(f"✅ AI-generated candidate created: {candidate_data['email']}, resume is {len(document_content) if document_content else 0} bytes")
|
||||
|
||||
return create_success_response({
|
||||
"message": "AI-generated candidate created successfully",
|
||||
"candidate": candidate_data
|
||||
"candidate": candidate_data,
|
||||
"resume": document_content,
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
@ -2272,6 +2335,60 @@ async def post_candidate_vectors(
|
||||
content=create_error_response("FETCH_ERROR", str(e))
|
||||
)
|
||||
|
||||
@api_router.delete("/candidates/{candidate_id}")
|
||||
async def delete_candidate(
|
||||
candidate_id: str = Path(...),
|
||||
admin_user = Depends(get_current_admin),
|
||||
database: RedisDatabase = Depends(get_database)
|
||||
):
|
||||
"""Delete a candidate"""
|
||||
try:
|
||||
# Check if admin user
|
||||
if not admin_user.is_admin:
|
||||
logger.warning(f"⚠️ Unauthorized delete attempt by user {admin_user.id}")
|
||||
return JSONResponse(
|
||||
status_code=403,
|
||||
content=create_error_response("FORBIDDEN", "Only admins can delete candidates")
|
||||
)
|
||||
|
||||
# Get candidate data
|
||||
candidate_data = await database.get_candidate(candidate_id)
|
||||
if not candidate_data:
|
||||
logger.warning(f"⚠️ Candidate not found for deletion: {candidate_id}")
|
||||
return JSONResponse(
|
||||
status_code=404,
|
||||
content=create_error_response("NOT_FOUND", "Candidate not found")
|
||||
)
|
||||
|
||||
await entities.entity_manager.remove_entity(candidate_id)
|
||||
|
||||
# Delete candidate from database
|
||||
await database.delete_candidate(candidate_id)
|
||||
|
||||
# Optionally delete files and documents associated with the candidate
|
||||
await database.delete_all_candidate_documents(candidate_id)
|
||||
|
||||
file_path = os.path.join(defines.user_dir, candidate_data["username"])
|
||||
if os.path.exists(file_path):
|
||||
try:
|
||||
shutil.rmtree(file_path)
|
||||
logger.info(f"🗑️ Deleted candidate files directory: {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to delete candidate files directory: {e}")
|
||||
|
||||
logger.info(f"🗑️ Candidate deleted: {candidate_id} by admin {admin_user.id}")
|
||||
|
||||
return create_success_response({
|
||||
"message": "Candidate deleted successfully",
|
||||
"candidateId": candidate_id
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Delete candidate error: {e}")
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content=create_error_response("DELETE_ERROR", "Failed to delete candidate")
|
||||
)
|
||||
@api_router.patch("/candidates/{candidate_id}")
|
||||
async def update_candidate(
|
||||
candidate_id: str = Path(...),
|
||||
@ -2283,15 +2400,16 @@ async def update_candidate(
|
||||
try:
|
||||
candidate_data = await database.get_candidate(candidate_id)
|
||||
if not candidate_data:
|
||||
logger.warning(f"⚠️ Candidate not found for update: {candidate_id}")
|
||||
return JSONResponse(
|
||||
status_code=404,
|
||||
content=create_error_response("NOT_FOUND", "Candidate not found")
|
||||
)
|
||||
|
||||
candidate = Candidate.model_validate(candidate_data)
|
||||
candidate = Candidate.model_validate(candidate_data) if not candidate_data.get("is_AI") else CandidateAI.model_validate(candidate_data)
|
||||
|
||||
# Check authorization (user can only update their own profile)
|
||||
if candidate.id != current_user.id:
|
||||
if current_user.is_admin is False and candidate.id != current_user.id:
|
||||
logger.warning(f"⚠️ Unauthorized update attempt by user {current_user.id} on candidate {candidate_id}")
|
||||
return JSONResponse(
|
||||
status_code=403,
|
||||
@ -2300,9 +2418,9 @@ async def update_candidate(
|
||||
|
||||
# Apply updates
|
||||
updates["updatedAt"] = datetime.now(UTC).isoformat()
|
||||
logger.info(f"🔄 Updating candidate {candidate_id} with data: {updates}")
|
||||
candidate_dict = candidate.model_dump()
|
||||
candidate_dict.update(updates)
|
||||
|
||||
updated_candidate = Candidate.model_validate(candidate_dict)
|
||||
await database.set_candidate(candidate_id, updated_candidate.model_dump())
|
||||
|
||||
@ -2333,7 +2451,7 @@ async def get_candidates(
|
||||
|
||||
# Get all candidates from Redis
|
||||
all_candidates_data = await database.get_all_candidates()
|
||||
candidates_list = [Candidate.model_validate(data) for data in all_candidates_data.values()]
|
||||
candidates_list = [Candidate.model_validate(data) if not data.get("is_AI") else CandidateAI.model_validate(data) for data in all_candidates_data.values()]
|
||||
|
||||
paginated_candidates, total = filter_and_paginate(
|
||||
candidates_list, page, limit, sortBy, sortOrder, filter_dict
|
||||
@ -2723,7 +2841,9 @@ async def get_candidate(
|
||||
content=create_error_response("NOT_FOUND", "Candidate not found")
|
||||
)
|
||||
|
||||
candidate = Candidate.model_validate(candidates_list[0])
|
||||
candidate_data = candidates_list[0]
|
||||
candidate = Candidate.model_validate(candidate_data) if not candidate_data.get("is_AI") else CandidateAI.model_validate(candidate_data)
|
||||
|
||||
return create_success_response(candidate.model_dump(by_alias=True, exclude_unset=True))
|
||||
|
||||
except Exception as e:
|
||||
|
@ -79,6 +79,7 @@ class ChatMessageType(str, Enum):
|
||||
INFO = "info"
|
||||
PREPARING = "preparing"
|
||||
PROCESSING = "processing"
|
||||
HEARTBEAT = "heartbeat"
|
||||
RESPONSE = "response"
|
||||
SEARCHING = "searching"
|
||||
RAG_RESULT = "rag_result"
|
||||
@ -90,6 +91,7 @@ class ChatMessageType(str, Enum):
|
||||
class ChatStatusType(str, Enum):
|
||||
INITIALIZING = "initializing"
|
||||
STREAMING = "streaming"
|
||||
STATUS = "status"
|
||||
DONE = "done"
|
||||
ERROR = "error"
|
||||
|
||||
@ -101,6 +103,7 @@ class ChatContextType(str, Enum):
|
||||
GENERAL = "general"
|
||||
GENERATE_PERSONA = "generate_persona"
|
||||
GENERATE_PROFILE = "generate_profile"
|
||||
GENERATE_IMAGE = "generate_image"
|
||||
RAG_SEARCH = "rag_search"
|
||||
|
||||
class AIModelType(str, Enum):
|
||||
@ -695,7 +698,6 @@ class ChromaDBGetResponse(BaseModel):
|
||||
|
||||
class ChatContext(BaseModel):
|
||||
type: ChatContextType
|
||||
requires_relationship: bool = Field(False, alias="requiresRelationship")
|
||||
related_entity_id: Optional[str] = Field(None, alias="relatedEntityId")
|
||||
related_entity_type: Optional[Literal["job", "candidate", "employer"]] = Field(None, alias="relatedEntityType")
|
||||
additional_context: Optional[Dict[str, Any]] = Field({}, alias="additionalContext")
|
||||
@ -718,10 +720,10 @@ class ChatMessageBase(BaseModel):
|
||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||
session_id: str = Field(..., alias="sessionId")
|
||||
sender_id: Optional[str] = Field(None, alias="senderId")
|
||||
status: ChatStatusType
|
||||
type: ChatMessageType
|
||||
sender: ChatSenderType
|
||||
timestamp: datetime
|
||||
status: ChatStatusType = ChatStatusType.INITIALIZING
|
||||
type: ChatMessageType = ChatMessageType.PREPARING
|
||||
sender: ChatSenderType = ChatSenderType.SYSTEM
|
||||
timestamp: datetime = Field(default_factory=lambda: datetime.now(UTC), alias="timestamp")
|
||||
tunables: Optional[Tunables] = None
|
||||
content: str = ""
|
||||
model_config = {
|
||||
|
Loading…
x
Reference in New Issue
Block a user