Persona creation in progress
This commit is contained in:
parent
88a2b9dd90
commit
40d59042ef
@ -114,7 +114,7 @@ const CandidateRegistrationForm = () => {
|
||||
|
||||
setLoading(true);
|
||||
try {
|
||||
const result = await apiClient.createCandidateWithVerification({
|
||||
const result = await apiClient.createCandidate({
|
||||
email: formData.email,
|
||||
username: formData.username,
|
||||
password: formData.password,
|
||||
|
@ -562,29 +562,6 @@ function useAuthenticationLogic() {
|
||||
console.log('User data updated', updatedUser);
|
||||
}, []);
|
||||
|
||||
const createCandidateAccount = useCallback(async (candidateData: CreateCandidateRequest): Promise<boolean> => {
|
||||
setAuthState(prev => ({ ...prev, isLoading: true, error: null }));
|
||||
|
||||
try {
|
||||
const candidate = await apiClient.createCandidate(candidateData);
|
||||
console.log('Candidate created:', candidate);
|
||||
|
||||
// Store email for potential verification resend
|
||||
setPendingVerificationEmail(candidateData.email);
|
||||
|
||||
setAuthState(prev => ({ ...prev, isLoading: false }));
|
||||
return true;
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Account creation failed';
|
||||
setAuthState(prev => ({
|
||||
...prev,
|
||||
isLoading: false,
|
||||
error: errorMessage
|
||||
}));
|
||||
return false;
|
||||
}
|
||||
}, [apiClient, setPendingVerificationEmail]);
|
||||
|
||||
const createEmployerAccount = useCallback(async (employerData: CreateEmployerRequest): Promise<boolean> => {
|
||||
setAuthState(prev => ({ ...prev, isLoading: true, error: null }));
|
||||
|
||||
@ -667,7 +644,6 @@ function useAuthenticationLogic() {
|
||||
resendEmailVerification,
|
||||
setPendingVerificationEmail,
|
||||
getPendingVerificationEmail,
|
||||
createCandidateAccount,
|
||||
createEmployerAccount,
|
||||
requestPasswordReset,
|
||||
refreshAuth,
|
||||
|
@ -10,7 +10,7 @@ import { useAuth } from 'hooks/AuthContext';
|
||||
import { useSelectedCandidate } from 'hooks/GlobalContext';
|
||||
|
||||
const CandidateListingPage = (props: BackstoryPageProps) => {
|
||||
const { apiClient } = useAuth();
|
||||
const { apiClient, user } = useAuth();
|
||||
const { selectedCandidate, setSelectedCandidate } = useSelectedCandidate();
|
||||
const navigate = useNavigate();
|
||||
const { setSnack } = props;
|
||||
@ -46,7 +46,8 @@ const CandidateListingPage = (props: BackstoryPageProps) => {
|
||||
|
||||
return (
|
||||
<Box sx={{display: "flex", flexDirection: "column"}}>
|
||||
<Box sx={{ p: 1, textAlign: "center" }}>
|
||||
{user?.isAdmin &&
|
||||
<Box sx={{ p: 1, textAlign: "center" }}>
|
||||
Not seeing a candidate you like?
|
||||
<Button
|
||||
variant="contained"
|
||||
@ -54,7 +55,8 @@ const CandidateListingPage = (props: BackstoryPageProps) => {
|
||||
onClick={() => { navigate('/generate-candidate') }}>
|
||||
Generate your own perfect AI candidate!
|
||||
</Button>
|
||||
</Box>
|
||||
</Box>
|
||||
}
|
||||
<Box sx={{ display: "flex", gap: 1, flexWrap: "wrap", justifyContent: "center" }}>
|
||||
{candidates?.map((u, i) =>
|
||||
<Box key={`${u.username}`}
|
||||
|
@ -20,6 +20,7 @@ import { Pulse } from 'components/Pulse';
|
||||
import { StreamingResponse } from 'services/api-client';
|
||||
import { ChatContext, ChatMessage, ChatMessageUser, ChatMessageBase, ChatSession, ChatQuery, Candidate, CandidateAI } from 'types/types';
|
||||
import { useAuth } from 'hooks/AuthContext';
|
||||
import { Message } from 'components/Message';
|
||||
|
||||
const emptyUser: CandidateAI = {
|
||||
userType: "candidate",
|
||||
@ -49,20 +50,26 @@ const emptyUser: CandidateAI = {
|
||||
ragContentSize: 0
|
||||
};
|
||||
|
||||
const defaultMessage: ChatMessage = {
|
||||
type: "preparing", status: "done", sender: "system", sessionId: "", timestamp: new Date(), content: ""
|
||||
};
|
||||
|
||||
const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
const { apiClient } = useAuth();
|
||||
const { apiClient, user } = useAuth();
|
||||
const { setSnack, submitQuery } = props;
|
||||
const [streaming, setStreaming] = useState<string>('');
|
||||
const [streaming, setStreaming] = useState<boolean>(false);
|
||||
const [streamingMessage, setStreamingMessage] = useState<ChatMessage | null>(null);
|
||||
const [processingMessage, setProcessingMessage] = useState<ChatMessage | null>(null);
|
||||
const [processing, setProcessing] = useState<boolean>(false);
|
||||
const [user, setUser] = useState<CandidateAI | null>(null);
|
||||
const [generatedUser, setGeneratedUser] = useState<CandidateAI | null>(null);
|
||||
const [prompt, setPrompt] = useState<string>('');
|
||||
const [resume, setResume] = useState<string>('');
|
||||
const [canGenImage, setCanGenImage] = useState<boolean>(false);
|
||||
const [status, setStatus] = useState<string>('');
|
||||
const [timestamp, setTimestamp] = useState<number>(0);
|
||||
const [state, setState] = useState<number>(0); // Replaced stateRef
|
||||
const [shouldGenerateProfile, setShouldGenerateProfile] = useState<boolean>(false);
|
||||
const [chatSession, setChatSession] = useState<ChatSession | null>(null);
|
||||
const [loading, setLoading] = useState<boolean>(false);
|
||||
|
||||
// Only keep refs that are truly necessary
|
||||
const controllerRef = useRef<StreamingResponse>(null);
|
||||
@ -70,15 +77,17 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
|
||||
/* Create the chat session */
|
||||
useEffect(() => {
|
||||
if (chatSession) {
|
||||
if (chatSession || loading) {
|
||||
return;
|
||||
}
|
||||
|
||||
const createChatSession = async () => {
|
||||
console.log('Creating chat session');
|
||||
try {
|
||||
const chatContext: ChatContext = { type: "generate_persona" };
|
||||
const response: ChatSession = await apiClient.createChatSession(chatContext);
|
||||
setChatSession(response);
|
||||
console.log(`Chat session created for generate_persona`, response);
|
||||
setSnack(`Chat session created for generate_persona: ${response.id}`);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
@ -86,120 +95,36 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
}
|
||||
};
|
||||
|
||||
createChatSession();
|
||||
}, [chatSession, setChatSession]);
|
||||
setLoading(true);
|
||||
createChatSession().then(() => { setLoading(false) });
|
||||
}, [chatSession, loading, setChatSession, setLoading, setSnack]);
|
||||
|
||||
const generatePersona = useCallback((query: ChatQuery) => {
|
||||
if (!chatSession || !chatSession.id) {
|
||||
return;
|
||||
}
|
||||
const sessionId: string = chatSession.id;
|
||||
|
||||
setPrompt(query.prompt || '');
|
||||
setState(0);
|
||||
setStatus("Generating persona...");
|
||||
setUser(emptyUser);
|
||||
setStreaming('');
|
||||
setResume('');
|
||||
setProcessing(true);
|
||||
setCanGenImage(false);
|
||||
setShouldGenerateProfile(false); // Reset the flag
|
||||
|
||||
const chatMessage: ChatMessageUser = {
|
||||
sessionId: chatSession.id,
|
||||
content: query.prompt,
|
||||
tunables: query.tunables,
|
||||
const generatePersona = async (prompt: string) => {
|
||||
const userMessage: ChatMessageUser = {
|
||||
content: prompt,
|
||||
sessionId: "",
|
||||
sender: "user",
|
||||
status: "done",
|
||||
type: "user",
|
||||
sender: "user",
|
||||
timestamp: new Date()
|
||||
};
|
||||
|
||||
const streamResponse = apiClient.sendMessageStream(chatMessage, {
|
||||
onMessage: (chatMessage: ChatMessage) => {
|
||||
console.log('Message:', chatMessage);
|
||||
// Update UI with partial content
|
||||
},
|
||||
onStatusChange: (status) => {
|
||||
console.log('Status changed:', status);
|
||||
// Update UI status indicator
|
||||
},
|
||||
onComplete: () => {
|
||||
console.log('Content complete');
|
||||
},
|
||||
onWarn: (warning) => {
|
||||
console.log("Warning:", warning);
|
||||
},
|
||||
onError: (error: string | ChatMessageBase) => {
|
||||
// Type-guard to determine if this is a ChatMessageBase or a string
|
||||
if (typeof error === "object" && error !== null && "content" in error) {
|
||||
console.log("Error message:", error);
|
||||
} else {
|
||||
console.log("Error string:", error);
|
||||
}
|
||||
},
|
||||
onStreaming: (chunk) => {
|
||||
console.log("Streaming: ", chunk);
|
||||
}
|
||||
});
|
||||
// controllerRef.current = streamQueryResponse({
|
||||
// query,
|
||||
// type: "persona",
|
||||
// connectionBase,
|
||||
// onComplete: (msg) => {
|
||||
// switch (msg.status) {
|
||||
// case "partial":
|
||||
// case "done":
|
||||
// setState(currentState => {
|
||||
// switch (currentState) {
|
||||
// case 0: /* Generating persona */
|
||||
// let partialUser = JSON.parse(jsonrepair((msg.response || '').trim()));
|
||||
// if (!partialUser.fullName) {
|
||||
// partialUser.fullName = `${partialUser.firstName} ${partialUser.lastName}`;
|
||||
// }
|
||||
// console.log("Setting final user data:", partialUser);
|
||||
// setUser({ ...partialUser });
|
||||
// return 1; /* Generating resume */
|
||||
// case 1: /* Generating resume */
|
||||
// setResume(msg.response || '');
|
||||
// return 2; /* RAG generation */
|
||||
// case 2: /* RAG generation */
|
||||
// return 3; /* Image generation */
|
||||
// default:
|
||||
// return currentState;
|
||||
// }
|
||||
// });
|
||||
|
||||
// if (msg.status === "done") {
|
||||
// setProcessing(false);
|
||||
// setCanGenImage(true);
|
||||
// setStatus('');
|
||||
// controllerRef.current = null;
|
||||
// setState(0);
|
||||
// // Set flag to trigger profile generation after user state updates
|
||||
// console.log("Persona generation complete, setting shouldGenerateProfile flag");
|
||||
// setShouldGenerateProfile(true);
|
||||
// }
|
||||
// break;
|
||||
// case "thinking":
|
||||
// setStatus(msg.response || '');
|
||||
// break;
|
||||
|
||||
// case "error":
|
||||
// console.log(`Error generating persona: ${msg.response}`);
|
||||
// setSnack(msg.response || "", "error");
|
||||
// setProcessing(false);
|
||||
// setUser(emptyUser);
|
||||
// controllerRef.current = null;
|
||||
// setState(0);
|
||||
// break;
|
||||
// }
|
||||
// },
|
||||
// onStreaming: (chunk) => {
|
||||
// setStreaming(chunk);
|
||||
// }
|
||||
// });
|
||||
}, [setSnack]);
|
||||
setPrompt(prompt || '');
|
||||
setProcessing(true);
|
||||
setProcessingMessage({ ...defaultMessage, content: "Generating persona..." });
|
||||
try {
|
||||
const result = await apiClient.createCandidateAI(userMessage);
|
||||
console.log(result.message);
|
||||
setGeneratedUser(result.candidate);
|
||||
setCanGenImage(true);
|
||||
setShouldGenerateProfile(true); // Reset the flag
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
setPrompt('');
|
||||
setProcessing(false);
|
||||
setProcessingMessage(null);
|
||||
setSnack("Unable to generate AI persona", "error");
|
||||
}
|
||||
};
|
||||
|
||||
const cancelQuery = useCallback(() => {
|
||||
if (controllerRef.current) {
|
||||
@ -214,35 +139,32 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
if (processing) {
|
||||
return;
|
||||
}
|
||||
const query: ChatQuery = {
|
||||
prompt: value,
|
||||
}
|
||||
generatePersona(query);
|
||||
generatePersona(value);
|
||||
}, [processing, generatePersona]);
|
||||
|
||||
const handleSendClick = useCallback(() => {
|
||||
const value = (backstoryTextRef.current && backstoryTextRef.current.getAndResetValue()) || "";
|
||||
generatePersona({ prompt: value });
|
||||
}, [generatePersona]);
|
||||
onEnter(value);
|
||||
}, [onEnter]);
|
||||
|
||||
// Effect to trigger profile generation when user data is ready
|
||||
useEffect(() => {
|
||||
console.log("useEffect triggered - shouldGenerateProfile:", shouldGenerateProfile, "user:", user?.username, user?.firstName);
|
||||
if (shouldGenerateProfile && user?.username !== "[blank]" && user?.firstName !== "[blank]") {
|
||||
console.log("Triggering profile generation with updated user data:", user);
|
||||
console.log("useEffect triggered - shouldGenerateProfile:", shouldGenerateProfile, "user:", generatedUser?.username, generatedUser?.firstName);
|
||||
if (shouldGenerateProfile && generatedUser?.username !== "[blank]" && generatedUser?.firstName !== "[blank]") {
|
||||
console.log("Triggering profile generation with updated user data:", generatedUser);
|
||||
if (controllerRef.current) {
|
||||
console.log("Controller already active, skipping profile generation");
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't generate if we still have blank user data
|
||||
if (user?.username === "[blank]" || user?.firstName === "[blank]") {
|
||||
if (generatedUser?.username === "[blank]" || generatedUser?.firstName === "[blank]") {
|
||||
console.log("Cannot generate profile: user data not ready");
|
||||
return;
|
||||
}
|
||||
|
||||
const imagePrompt = `A photorealistic profile picture of a ${user?.age} year old ${user?.gender?.toLocaleLowerCase()} ${user?.ethnicity?.toLocaleLowerCase()} person. ${prompt}`
|
||||
setStatus('Starting image generation...');
|
||||
const imagePrompt = `A photorealistic profile picture of a ${generatedUser?.age} year old ${generatedUser?.gender?.toLocaleLowerCase()} ${generatedUser?.ethnicity?.toLocaleLowerCase()} person. ${prompt}`
|
||||
setProcessingMessage({ ...defaultMessage, content: 'Starting image generation...' });
|
||||
setProcessing(true);
|
||||
setCanGenImage(false);
|
||||
setState(3);
|
||||
@ -252,7 +174,7 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
// query: {
|
||||
// prompt: imagePrompt,
|
||||
// agentOptions: {
|
||||
// username: user?.username,
|
||||
// username: generatedUser?.username,
|
||||
// filename: "profile.png"
|
||||
// }
|
||||
// },
|
||||
@ -270,8 +192,8 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
// setState(0);
|
||||
// setCanGenImage(true);
|
||||
// setShouldGenerateProfile(false);
|
||||
// setUser({
|
||||
// ...(user ? user : emptyUser),
|
||||
// setGeneratedUser({
|
||||
// ...(generatedUser ? generatedUser : emptyUser),
|
||||
// hasProfile: true
|
||||
// });
|
||||
// }
|
||||
@ -308,25 +230,27 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
// }
|
||||
// });
|
||||
}
|
||||
}, [shouldGenerateProfile, user, prompt, setSnack]);
|
||||
}, [shouldGenerateProfile, generatedUser, prompt, setSnack]);
|
||||
|
||||
// Handle streaming updates based on current state
|
||||
useEffect(() => {
|
||||
if (streaming.trim().length === 0) {
|
||||
const content = streamingMessage?.content.trim();
|
||||
if (!content) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
switch (state) {
|
||||
case 0: /* Generating persona */
|
||||
const partialUser = {...emptyUser, ...JSON.parse(jsonrepair(`${streaming.trim()}...`))};
|
||||
const partialUser = { ...emptyUser, ...JSON.parse(jsonrepair(content)) };
|
||||
if (!partialUser.fullName) {
|
||||
partialUser.fullName = `${partialUser.firstName} ${partialUser.lastName}`;
|
||||
}
|
||||
setUser(partialUser);
|
||||
setGeneratedUser(partialUser);
|
||||
break;
|
||||
case 1: /* Generating resume */
|
||||
setResume(streaming);
|
||||
setResume(content);
|
||||
break;
|
||||
case 3: /* RAG streaming */
|
||||
break;
|
||||
@ -338,6 +262,12 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
}
|
||||
}, [streaming, state]);
|
||||
|
||||
if (!user?.isAdmin) {
|
||||
return (<Box>You must be logged in as an admin to generate AI candidates.</Box>);
|
||||
}
|
||||
if (!chatSession) {
|
||||
return (<></>);
|
||||
}
|
||||
return (
|
||||
<Box className="GenerateCandidate" sx={{
|
||||
display: "flex",
|
||||
@ -346,8 +276,8 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
gap: 1,
|
||||
maxWidth: { xs: '100%', md: '700px', lg: '1024px' },
|
||||
}}>
|
||||
{user && <CandidateInfo
|
||||
candidate={user}
|
||||
{generatedUser && <CandidateInfo
|
||||
candidate={generatedUser}
|
||||
sx={{flexShrink: 1}}/>
|
||||
}
|
||||
{ prompt &&
|
||||
@ -361,16 +291,13 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
justifyContent: "center",
|
||||
m: 2,
|
||||
}}>
|
||||
{ status && <Box sx={{ display: "flex", flexDirection: "column"}}>
|
||||
<Box sx={{ fontSize: "0.5rem"}}>Generation status</Box>
|
||||
<Box sx={{ fontWeight: "bold"}}>{status}</Box>
|
||||
</Box>}
|
||||
<PropagateLoader
|
||||
size="10px"
|
||||
loading={processing}
|
||||
aria-label="Loading Spinner"
|
||||
data-testid="loader"
|
||||
/>
|
||||
{processingMessage && <Message message={processingMessage} {...{ chatSession, submitQuery, setSnack }} />}
|
||||
<PropagateLoader
|
||||
size="10px"
|
||||
loading={processing}
|
||||
aria-label="Loading Spinner"
|
||||
data-testid="loader"
|
||||
/>
|
||||
</Box>
|
||||
}
|
||||
<Box sx={{display: "flex", flexDirection: "column"}}>
|
||||
@ -381,8 +308,8 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
}}>
|
||||
<Box sx={{ display: "flex", position: "relative", width: "min-content", height: "min-content" }}>
|
||||
<Avatar
|
||||
src={user?.profileImage ? `/api/1.0/candidates/profile/${user.username}` : ''}
|
||||
alt={`${user?.fullName}'s profile`}
|
||||
src={generatedUser?.profileImage ? `/api/1.0/candidates/profile/${generatedUser.username}` : ''}
|
||||
alt={`${generatedUser?.fullName}'s profile`}
|
||||
sx={{
|
||||
width: 80,
|
||||
height: 80,
|
||||
@ -392,7 +319,7 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
{processing && <Pulse sx={{ position: "relative", left: "-80px", top: "0px", mr: "-80px" }} timestamp={timestamp} />}
|
||||
</Box>
|
||||
|
||||
<Tooltip title={`${user?.profileImage ? 'Re-' : ''}Generate Picture`}>
|
||||
<Tooltip title={`${generatedUser?.profileImage ? 'Re-' : ''}Generate Picture`}>
|
||||
<span style={{ display: "flex", flexGrow: 1 }}>
|
||||
<Button
|
||||
sx={{ m: 1, gap: 1, justifySelf: "flex-start", alignSelf: "center", flexGrow: 0, maxHeight: "min-content" }}
|
||||
@ -401,7 +328,7 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
processing || !canGenImage
|
||||
}
|
||||
onClick={() => { setShouldGenerateProfile(true); }}>
|
||||
{user?.profileImage ? 'Re-' : ''}Generate Picture<SendIcon />
|
||||
{generatedUser?.profileImage ? 'Re-' : ''}Generate Picture<SendIcon />
|
||||
</Button>
|
||||
</span>
|
||||
</Tooltip>
|
||||
|
@ -13,27 +13,26 @@ import {
|
||||
parseApiResponse,
|
||||
parsePaginatedResponse,
|
||||
handleApiResponse,
|
||||
handlePaginatedApiResponse,
|
||||
// handlePaginatedApiResponse,
|
||||
createPaginatedRequest,
|
||||
toUrlParams,
|
||||
extractApiData,
|
||||
ApiResponse,
|
||||
// ApiResponse,
|
||||
PaginatedResponse,
|
||||
PaginatedRequest
|
||||
} from 'types/conversion';
|
||||
|
||||
// Import generated date conversion functions
|
||||
import {
|
||||
convertCandidateFromApi,
|
||||
convertEmployerFromApi,
|
||||
convertJobFromApi,
|
||||
convertJobApplicationFromApi,
|
||||
convertChatSessionFromApi,
|
||||
// convertCandidateFromApi,
|
||||
// convertEmployerFromApi,
|
||||
// convertJobFromApi,
|
||||
// convertJobApplicationFromApi,
|
||||
// convertChatSessionFromApi,
|
||||
convertChatMessageFromApi,
|
||||
convertFromApi,
|
||||
convertArrayFromApi
|
||||
} from 'types/types';
|
||||
import internal from 'stream';
|
||||
|
||||
// ============================
|
||||
// Streaming Types and Interfaces
|
||||
@ -55,26 +54,11 @@ interface StreamingResponse {
|
||||
promise: Promise<Types.ChatMessage[]>;
|
||||
}
|
||||
|
||||
export interface CreateCandidateRequest {
|
||||
email: string;
|
||||
username: string;
|
||||
password: string;
|
||||
firstName: string;
|
||||
lastName: string;
|
||||
phone?: string;
|
||||
}
|
||||
|
||||
export interface CreateEmployerRequest {
|
||||
email: string;
|
||||
username: string;
|
||||
password: string;
|
||||
companyName: string;
|
||||
industry: string;
|
||||
companySize: string;
|
||||
companyDescription: string;
|
||||
websiteUrl?: string;
|
||||
phone?: string;
|
||||
}
|
||||
interface CreateCandidateAIResponse {
|
||||
message: string;
|
||||
candidate: Types.CandidateAI;
|
||||
};
|
||||
|
||||
|
||||
export interface PasswordResetRequest {
|
||||
email: string;
|
||||
@ -201,8 +185,8 @@ class ApiClient {
|
||||
/**
|
||||
* Create candidate with email verification
|
||||
*/
|
||||
async createCandidateWithVerification(
|
||||
candidate: CreateCandidateWithVerificationRequest
|
||||
async createCandidate(
|
||||
candidate: CreateCandidateRequest
|
||||
): Promise<RegistrationResponse> {
|
||||
const response = await fetch(`${this.baseUrl}/candidates`, {
|
||||
method: 'POST',
|
||||
@ -213,11 +197,27 @@ class ApiClient {
|
||||
return handleApiResponse<RegistrationResponse>(response);
|
||||
}
|
||||
|
||||
async createCandidateAI(
|
||||
userMessage: Types.ChatMessageUser
|
||||
): Promise<CreateCandidateAIResponse> {
|
||||
const response = await fetch(`${this.baseUrl}/candidates/ai`, {
|
||||
method: 'POST',
|
||||
headers: this.defaultHeaders,
|
||||
body: JSON.stringify(formatApiRequest(userMessage))
|
||||
});
|
||||
|
||||
const result = await handleApiResponse<CreateCandidateAIResponse>(response);
|
||||
return {
|
||||
message: result.message,
|
||||
candidate: convertFromApi<Types.CandidateAI>(result.candidate, "CandidateAI")
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create employer with email verification
|
||||
*/
|
||||
async createEmployerWithVerification(
|
||||
employer: CreateEmployerWithVerificationRequest
|
||||
employer: CreateEmployerRequest
|
||||
): Promise<RegistrationResponse> {
|
||||
const response = await fetch(`${this.baseUrl}/employers`, {
|
||||
method: 'POST',
|
||||
@ -499,16 +499,6 @@ class ApiClient {
|
||||
// Candidate Methods with Date Conversion
|
||||
// ============================
|
||||
|
||||
async createCandidate(request: CreateCandidateRequest): Promise<Types.Candidate> {
|
||||
const response = await fetch(`${this.baseUrl}/candidates`, {
|
||||
method: 'POST',
|
||||
headers: this.defaultHeaders,
|
||||
body: JSON.stringify(formatApiRequest(request))
|
||||
});
|
||||
|
||||
return this.handleApiResponseWithConversion<Types.Candidate>(response, 'Candidate');
|
||||
}
|
||||
|
||||
async getCandidate(username: string): Promise<Types.Candidate> {
|
||||
const response = await fetch(`${this.baseUrl}/candidates/${username}`, {
|
||||
headers: this.defaultHeaders
|
||||
@ -1209,7 +1199,7 @@ class ApiClient {
|
||||
// Request/Response Types
|
||||
// ============================
|
||||
|
||||
export interface CreateCandidateWithVerificationRequest {
|
||||
export interface CreateCandidateRequest {
|
||||
email: string;
|
||||
username: string;
|
||||
password: string;
|
||||
@ -1218,7 +1208,7 @@ export interface CreateCandidateWithVerificationRequest {
|
||||
phone?: string;
|
||||
}
|
||||
|
||||
export interface CreateEmployerWithVerificationRequest {
|
||||
export interface CreateEmployerRequest {
|
||||
email: string;
|
||||
username: string;
|
||||
password: string;
|
||||
@ -1230,6 +1220,8 @@ export interface CreateEmployerWithVerificationRequest {
|
||||
phone?: string;
|
||||
}
|
||||
|
||||
|
||||
|
||||
export interface EmailVerificationRequest {
|
||||
token: string;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
// Generated TypeScript types from Pydantic models
|
||||
// Source: src/backend/models.py
|
||||
// Generated on: 2025-06-03T04:01:05.747332
|
||||
// Generated on: 2025-06-03T15:05:33.759564
|
||||
// DO NOT EDIT MANUALLY - This file is auto-generated
|
||||
|
||||
// ============================
|
||||
|
@ -378,7 +378,7 @@ class Agent(BaseModel, ABC):
|
||||
return
|
||||
|
||||
async def generate(
|
||||
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate, temperature=0.7
|
||||
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate | None, temperature=0.7
|
||||
) -> AsyncGenerator[ChatMessage | ChatMessageBase, None]:
|
||||
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||
|
||||
@ -443,7 +443,7 @@ Content: { content }
|
||||
for m in self.conversation
|
||||
])
|
||||
# Add the RAG context to the messages if available
|
||||
if rag_context:
|
||||
if rag_context and user:
|
||||
messages.append(
|
||||
LLMMessage(
|
||||
role="user",
|
||||
|
@ -51,7 +51,6 @@ Use that spelling instead of any spelling you may find in the <|context|>.
|
||||
|
||||
async for message in super().generate(llm, model, user_message, user, temperature):
|
||||
yield message
|
||||
|
||||
|
||||
# Register the base agent
|
||||
agent_registry.register(CandidateChat._agent_type, CandidateChat)
|
||||
|
609
src/backend/agents/generate_persona.py
Normal file
609
src/backend/agents/generate_persona.py
Normal file
@ -0,0 +1,609 @@
|
||||
from __future__ import annotations
|
||||
from datetime import UTC, datetime
|
||||
from pydantic import model_validator, Field, BaseModel # type: ignore
|
||||
from typing import (
|
||||
Dict,
|
||||
Literal,
|
||||
ClassVar,
|
||||
cast,
|
||||
Any,
|
||||
Tuple,
|
||||
AsyncGenerator,
|
||||
List,
|
||||
Optional
|
||||
# override
|
||||
) # NOTE: You must import Optional for late binding to work
|
||||
import inspect
|
||||
import random
|
||||
import re
|
||||
import json
|
||||
import traceback
|
||||
import asyncio
|
||||
import time
|
||||
import asyncio
|
||||
import time
|
||||
import os
|
||||
import random
|
||||
from names_dataset import NameDataset, NameWrapper # type: ignore
|
||||
|
||||
from .base import Agent, agent_registry, LLMMessage
|
||||
from models import Candidate, ChatMessage, ChatMessageBase, ChatMessageMetaData, ChatMessageType, ChatMessageUser, ChatOptions, ChatSenderType, ChatStatusType
|
||||
import model_cast
|
||||
from logger import logger
|
||||
import defines
|
||||
|
||||
seed = int(time.time())
|
||||
random.seed(seed)
|
||||
|
||||
emptyUser = {
|
||||
"profile_url": "",
|
||||
"description": "",
|
||||
"rag_content_size": 0,
|
||||
"username": "",
|
||||
"first_name": "",
|
||||
"last_name": "",
|
||||
"full_name": "",
|
||||
"email": "",
|
||||
"phone": "",
|
||||
"title": "",
|
||||
"contact_info": {},
|
||||
"questions": [],
|
||||
}
|
||||
|
||||
generate_persona_system_prompt = """\
|
||||
You are a casting director for a movie. Your job is to provide information on ficticious personas for use in a screen play.
|
||||
|
||||
All response field MUST BE IN ENGLISH, regardless of ethnicity.
|
||||
|
||||
You will be provided with defaults to use if not specified by the user:
|
||||
|
||||
```json
|
||||
{
|
||||
"age": number,
|
||||
"gender": "male" | "female",
|
||||
"ethnicity": string,
|
||||
"full_name": string,
|
||||
"first_name": string,
|
||||
"last_name": string,
|
||||
}
|
||||
```
|
||||
|
||||
Additional information provided in the user message can override those defaults.
|
||||
|
||||
You need to randomly assign an English username (can include numbers), a first name, last name, and a two English sentence description of that individual's work given the demographics provided.
|
||||
|
||||
Your response must be in JSON.
|
||||
Provide only the JSON response, and match the field names EXACTLY.
|
||||
Provide all information in English ONLY, with no other commentary:
|
||||
|
||||
```json
|
||||
{
|
||||
"username": string, # A likely-to-be unique username, no more than 15 characters (can include numbers and letters but no special characters)
|
||||
"description": string, # One to two sentence description of their job
|
||||
"location": string, # In the location, provide ALL of: City, State/Region, and Country
|
||||
"phone": string, # Location appropriate phone number with area code
|
||||
"email": string, # primary email address
|
||||
"title": string, # Job title of their current job
|
||||
}
|
||||
```
|
||||
|
||||
Make sure to provide a username and that the field name for the job description is "description".
|
||||
|
||||
DO NOT infer, imply, abbreviate, or state the ethnicity or age in the username or description. You are providing those only for use later by the system when casting individuals for the role.
|
||||
"""
|
||||
|
||||
generate_resume_system_prompt = """
|
||||
You are a creative writing casting director. As part of the casting, you are building backstories about individuals. The first part
|
||||
of that is to create an in-depth resume for the person. You will be provided with the following information:
|
||||
|
||||
```json
|
||||
"full_name": string, # Person full name
|
||||
"location": string, # Location of residence
|
||||
"age": number, # Age of candidate
|
||||
"description": string # A brief description of the person
|
||||
```
|
||||
|
||||
Use that information to invent a full career resume. Include sections such as:
|
||||
|
||||
* Contact information
|
||||
* Job goal
|
||||
* Top skills
|
||||
* Detailed work history. If they are under the age of 25, you might include skills, hobbies, or volunteering they may have done while an adolescent
|
||||
* In the work history, provide company names, years of employment, and their role
|
||||
* Education
|
||||
|
||||
Provide the resume in Markdown format. DO NOT provide any commentary before or after the resume.
|
||||
"""
|
||||
|
||||
# Debug version to identify the issue
|
||||
import random
|
||||
import logging
|
||||
|
||||
# Set up logging to see what's happening
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class EthnicNameGenerator:
|
||||
def __init__(self):
|
||||
try:
|
||||
from names_dataset import NameDataset # type: ignore
|
||||
self.nd = NameDataset()
|
||||
except ImportError:
|
||||
logger.error("NameDataset not available. Please install: pip install names-dataset")
|
||||
self.nd = None
|
||||
except Exception as e:
|
||||
logger.error(f"Error initializing NameDataset: {e}")
|
||||
self.nd = None
|
||||
|
||||
# US Census 2020 approximate ethnic distribution
|
||||
self.ethnic_weights = {
|
||||
'White': 0.576,
|
||||
'Hispanic': 0.186,
|
||||
'Black': 0.134,
|
||||
'Asian': 0.062,
|
||||
'Native American': 0.013,
|
||||
'Pacific Islander': 0.003,
|
||||
'Mixed/Other': 0.026
|
||||
}
|
||||
|
||||
# Map ethnicities to countries (using alpha-2 codes that NameDataset uses)
|
||||
self.ethnic_country_mapping = {
|
||||
'White': ['US', 'GB', 'DE', 'IE', 'IT', 'PL', 'FR', 'CA', 'AU'],
|
||||
'Hispanic': ['MX', 'ES', 'CO', 'PE', 'AR', 'CU', 'VE', 'CL'],
|
||||
'Black': ['US'], # African American names
|
||||
'Asian': ['CN', 'IN', 'PH', 'VN', 'KR', 'JP', 'TH', 'MY'],
|
||||
'Native American': ['US'],
|
||||
'Pacific Islander': ['US'],
|
||||
'Mixed/Other': ['US']
|
||||
}
|
||||
|
||||
def get_weighted_ethnicity(self) -> str:
|
||||
"""Select ethnicity based on US demographic weights"""
|
||||
ethnicities = list(self.ethnic_weights.keys())
|
||||
weights = list(self.ethnic_weights.values())
|
||||
return random.choices(ethnicities, weights=weights)[0]
|
||||
|
||||
def get_names_by_criteria(self, countries: List[str], gender: Optional[str] = None,
|
||||
n: int = 50, use_first_names: bool = True) -> List[str]:
|
||||
"""Get names matching criteria using NameDataset's get_top_names method"""
|
||||
if not self.nd:
|
||||
return []
|
||||
|
||||
all_names = []
|
||||
for country_code in countries:
|
||||
try:
|
||||
# Get top names for this country
|
||||
top_names = self.nd.get_top_names(
|
||||
n=n,
|
||||
use_first_names=use_first_names,
|
||||
country_alpha2=country_code,
|
||||
gender=gender
|
||||
)
|
||||
|
||||
if country_code in top_names:
|
||||
if use_first_names and gender:
|
||||
# For first names with gender specified
|
||||
gender_key = 'M' if gender.upper() in ['M', 'MALE'] else 'F'
|
||||
if gender_key in top_names[country_code]:
|
||||
all_names.extend(top_names[country_code][gender_key])
|
||||
elif use_first_names:
|
||||
# For first names without gender (get both)
|
||||
for gender_names in top_names[country_code].values():
|
||||
all_names.extend(gender_names)
|
||||
else:
|
||||
# For last names
|
||||
all_names.extend(top_names[country_code])
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error getting names for {country_code}: {e}")
|
||||
continue
|
||||
|
||||
return list(set(all_names)) # Remove duplicates
|
||||
|
||||
def get_name_by_ethnicity(self, ethnicity: str, gender: str = 'random') -> Tuple[str, str, str, str]:
|
||||
"""Generate a name based on ethnicity using the correct NameDataset API"""
|
||||
if gender == 'random':
|
||||
gender = random.choice(['Male', 'Female'])
|
||||
|
||||
countries = self.ethnic_country_mapping.get(ethnicity, ['US'])
|
||||
|
||||
# Get first names
|
||||
first_names = self.get_names_by_criteria(
|
||||
countries=countries,
|
||||
gender=gender,
|
||||
use_first_names=True
|
||||
)
|
||||
|
||||
# Get last names
|
||||
last_names = self.get_names_by_criteria(
|
||||
countries=countries,
|
||||
use_first_names=False
|
||||
)
|
||||
|
||||
# Select names or use fallbacks
|
||||
if first_names:
|
||||
first_name = random.choice(first_names)
|
||||
else:
|
||||
first_name = self._get_fallback_first_name(gender, ethnicity)
|
||||
logger.info(f"Using fallback first name for {ethnicity} {gender}")
|
||||
|
||||
if last_names:
|
||||
last_name = random.choice(last_names)
|
||||
else:
|
||||
last_name = self._get_fallback_last_name(ethnicity)
|
||||
logger.info(f"Using fallback last name for {ethnicity}")
|
||||
|
||||
return first_name, last_name, ethnicity, gender
|
||||
|
||||
def _get_fallback_first_name(self, gender: str, ethnicity: str) -> str:
|
||||
"""Provide culturally appropriate fallback first names"""
|
||||
fallback_names = {
|
||||
'White': {
|
||||
'Male': ['James', 'Robert', 'John', 'Michael', 'William', 'David', 'Richard', 'Joseph'],
|
||||
'Female': ['Mary', 'Patricia', 'Jennifer', 'Linda', 'Elizabeth', 'Barbara', 'Susan', 'Jessica']
|
||||
},
|
||||
'Hispanic': {
|
||||
'Male': ['José', 'Luis', 'Miguel', 'Juan', 'Francisco', 'Alejandro', 'Antonio', 'Carlos'],
|
||||
'Female': ['María', 'Guadalupe', 'Juana', 'Margarita', 'Francisca', 'Teresa', 'Rosa', 'Ana']
|
||||
},
|
||||
'Black': {
|
||||
'Male': ['James', 'Robert', 'John', 'Michael', 'William', 'David', 'Richard', 'Charles'],
|
||||
'Female': ['Mary', 'Patricia', 'Linda', 'Elizabeth', 'Barbara', 'Susan', 'Jessica', 'Sarah']
|
||||
},
|
||||
'Asian': {
|
||||
'Male': ['Wei', 'Ming', 'Chen', 'Li', 'Kumar', 'Raj', 'Hiroshi', 'Takeshi'],
|
||||
'Female': ['Mei', 'Lin', 'Ling', 'Priya', 'Yuki', 'Soo', 'Hana', 'Anh']
|
||||
}
|
||||
}
|
||||
|
||||
ethnicity_names = fallback_names.get(ethnicity, fallback_names['White'])
|
||||
return random.choice(ethnicity_names.get(gender, ethnicity_names['Male']))
|
||||
|
||||
def _get_fallback_last_name(self, ethnicity: str) -> str:
|
||||
"""Provide culturally appropriate fallback last names"""
|
||||
fallback_surnames = {
|
||||
'White': ['Smith', 'Johnson', 'Williams', 'Brown', 'Jones', 'Miller', 'Wilson', 'Moore'],
|
||||
'Hispanic': ['García', 'Rodríguez', 'Martínez', 'López', 'González', 'Pérez', 'Sánchez', 'Ramírez'],
|
||||
'Black': ['Johnson', 'Williams', 'Brown', 'Jones', 'Davis', 'Miller', 'Wilson', 'Moore'],
|
||||
'Asian': ['Li', 'Wang', 'Zhang', 'Liu', 'Chen', 'Yang', 'Huang', 'Zhao']
|
||||
}
|
||||
|
||||
return random.choice(fallback_surnames.get(ethnicity, fallback_surnames['White']))
|
||||
|
||||
def generate_random_name(self, gender: str = 'random') -> Tuple[str, str, str, str]:
|
||||
"""Generate a random name with ethnicity based on US demographics"""
|
||||
ethnicity = self.get_weighted_ethnicity()
|
||||
return self.get_name_by_ethnicity(ethnicity, gender)
|
||||
|
||||
def generate_multiple_names(self, count: int = 10, gender: str = 'random') -> List[Dict]:
|
||||
"""Generate multiple random names"""
|
||||
names = []
|
||||
for _ in range(count):
|
||||
first, last, ethnicity, actual_gender = self.generate_random_name(gender)
|
||||
names.append({
|
||||
'full_name': f"{first} {last}",
|
||||
'first_name': first,
|
||||
'last_name': last,
|
||||
'ethnicity': ethnicity,
|
||||
'gender': actual_gender
|
||||
})
|
||||
return names
|
||||
|
||||
class GeneratePersona(Agent):
|
||||
agent_type: Literal["generate_persona"] = "generate_persona" # type: ignore
|
||||
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
||||
agent_persist: bool = False
|
||||
|
||||
system_prompt: str = generate_persona_system_prompt
|
||||
age: int = 0
|
||||
gender: str = ""
|
||||
username: str = ""
|
||||
first_name: str = ""
|
||||
last_name: str = ""
|
||||
full_name: str = ""
|
||||
ethnicity: str = ""
|
||||
|
||||
generator: Any = Field(default=EthnicNameGenerator(), exclude=True)
|
||||
llm: Any = Field(default=None, exclude=True)
|
||||
model: str = Field(default=None, exclude=True)
|
||||
|
||||
def randomize(self):
|
||||
self.age = random.randint(22, 67)
|
||||
# Use random.choices with explicit type casting to satisfy Literal type
|
||||
self.first_name, self.last_name, self.ethnicity, self.gender = self.generator.generate_random_name()
|
||||
self.full_name = f"{self.first_name} {self.last_name}"
|
||||
|
||||
async def call_llm(self, llm: Any, model: str, user_message: ChatMessageUser, system_prompt: str, temperature=0.7):
|
||||
chat_message = ChatMessage(
|
||||
session_id=user_message.session_id,
|
||||
tunables=user_message.tunables,
|
||||
status=ChatStatusType.INITIALIZING,
|
||||
type=ChatMessageType.PREPARING,
|
||||
sender=ChatSenderType.ASSISTANT,
|
||||
content="",
|
||||
timestamp=datetime.now(UTC)
|
||||
)
|
||||
|
||||
chat_message.metadata = ChatMessageMetaData()
|
||||
chat_message.metadata.options = ChatOptions(
|
||||
seed=8911,
|
||||
num_ctx=self.context_size,
|
||||
temperature=temperature, # Higher temperature to encourage tool usage
|
||||
)
|
||||
|
||||
messages: List[LLMMessage] = [
|
||||
LLMMessage(role="system", content=system_prompt),
|
||||
LLMMessage(role="user", content=user_message.content),
|
||||
]
|
||||
|
||||
# Reset the response for streaming
|
||||
chat_message.content = ""
|
||||
chat_message.type = ChatMessageType.GENERATING
|
||||
chat_message.status = ChatStatusType.STREAMING
|
||||
|
||||
for response in llm.chat(
|
||||
model=model,
|
||||
messages=messages,
|
||||
options={
|
||||
**chat_message.metadata.options.model_dump(exclude_unset=True),
|
||||
},
|
||||
stream=True,
|
||||
):
|
||||
if not response:
|
||||
chat_message.status = ChatStatusType.ERROR
|
||||
chat_message.content = "No response from LLM."
|
||||
yield chat_message
|
||||
return
|
||||
|
||||
chat_message.content += response.message.content
|
||||
|
||||
if not response.done:
|
||||
chat_chunk = model_cast.cast_to_model(ChatMessageBase, chat_message)
|
||||
chat_chunk.content = response.message.content
|
||||
yield chat_message
|
||||
continue
|
||||
|
||||
if response.done:
|
||||
self.collect_metrics(response)
|
||||
chat_message.metadata.eval_count += response.eval_count
|
||||
chat_message.metadata.eval_duration += response.eval_duration
|
||||
chat_message.metadata.prompt_eval_count += response.prompt_eval_count
|
||||
chat_message.metadata.prompt_eval_duration += response.prompt_eval_duration
|
||||
self.context_tokens = (
|
||||
response.prompt_eval_count + response.eval_count
|
||||
)
|
||||
chat_message.type = ChatMessageType.RESPONSE
|
||||
chat_message.status = ChatStatusType.DONE
|
||||
yield chat_message
|
||||
|
||||
async def generate(
|
||||
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate, temperature=0.7
|
||||
):
|
||||
self.randomize()
|
||||
|
||||
original_prompt = user_message.content
|
||||
|
||||
user_message.content = f"""\
|
||||
```json
|
||||
{json.dumps({
|
||||
"age": self.age,
|
||||
"gender": self.gender,
|
||||
"ethnicity": self.ethnicity,
|
||||
"full_name": self.full_name,
|
||||
"first_name": self.first_name,
|
||||
"last_name": self.last_name,
|
||||
})}
|
||||
```
|
||||
"""
|
||||
|
||||
if original_prompt:
|
||||
user_message.content += f"""
|
||||
Incorporate the following into the job description: {original_prompt}
|
||||
"""
|
||||
|
||||
|
||||
#
|
||||
# Generate the persona
|
||||
#
|
||||
generating_message = None
|
||||
async for generating_message in self.call_llm(
|
||||
llm=llm, model=model,
|
||||
user_message=user_message,
|
||||
system_prompt=generate_persona_system_prompt,
|
||||
temperature=temperature,
|
||||
):
|
||||
if generating_message.status == ChatStatusType.ERROR:
|
||||
logger.error(f"Error generating persona: {generating_message.content}")
|
||||
raise Exception(generating_message.content)
|
||||
|
||||
if generating_message.status != ChatStatusType.DONE:
|
||||
yield generating_message
|
||||
|
||||
if not generating_message:
|
||||
raise Exception("No response from LLM during persona generation")
|
||||
|
||||
json_str = self.extract_json_from_text(generating_message.content)
|
||||
try:
|
||||
persona = {
|
||||
"age": self.age,
|
||||
"ethnicity": self.ethnicity,
|
||||
"gender": self.gender,
|
||||
"full_name": self.full_name,
|
||||
"first_name": self.first_name,
|
||||
"last_name": self.last_name,
|
||||
} | json.loads(json_str)
|
||||
self.username = persona.get("username", None)
|
||||
if not self.username:
|
||||
raise ValueError("LLM did not generate a username")
|
||||
self.username = re.sub(r'\s+', '.', self.username)
|
||||
user_dir = os.path.join(defines.user_dir, persona["username"])
|
||||
while os.path.exists(user_dir):
|
||||
match = re.match(r"^(.*?)(\d*)$", persona["username"])
|
||||
if match:
|
||||
base = match.group(1)
|
||||
num = match.group(2)
|
||||
iteration = int(num) + 1 if num else 1
|
||||
persona["username"] = f"{base}{iteration}"
|
||||
user_dir = os.path.join(defines.user_dir, persona["username"])
|
||||
|
||||
for key in persona:
|
||||
if isinstance(persona[key], str):
|
||||
persona[key] = persona[key].strip()
|
||||
# Mark this persona as AI generated
|
||||
for to_lower in ["gender", "ethnicity"]:
|
||||
if to_lower in persona:
|
||||
persona[to_lower] = persona[to_lower].lower()
|
||||
# Convert CITY, STATE, COUNTRY to Location type
|
||||
if "location" in persona:
|
||||
location_parts = persona["location"].split(",")
|
||||
if len(location_parts) == 3:
|
||||
city, state, country = [part.strip() for part in location_parts]
|
||||
persona["location"] = {
|
||||
"city": city,
|
||||
"state": state,
|
||||
"country": country
|
||||
}
|
||||
else:
|
||||
logger.error(f"Invalid location format: {persona['location']}")
|
||||
persona["location"] = None
|
||||
persona["is_ai"] = True
|
||||
except Exception as e:
|
||||
generating_message.content = f"Unable to parse LLM returned content: {json_str} {str(e)}"
|
||||
generating_message.status = ChatStatusType.ERROR
|
||||
logger.error(traceback.format_exc())
|
||||
logger.error(generating_message.content)
|
||||
yield generating_message
|
||||
return
|
||||
|
||||
# Persona generated
|
||||
generating_message.content = json.dumps(persona)
|
||||
generating_message.status = ChatStatusType.DONE
|
||||
generating_message.type = ChatMessageType.RESPONSE
|
||||
|
||||
yield generating_message
|
||||
|
||||
# #
|
||||
# # Generate the resume
|
||||
# #
|
||||
# message.status = "thinking"
|
||||
# message.response = f"Generating resume for {persona['full_name']}..."
|
||||
# yield message
|
||||
|
||||
# prompt = f"""
|
||||
# ```json
|
||||
# {{
|
||||
# "full_name": "{persona["full_name"]}",
|
||||
# "location": "{persona["location"]}",
|
||||
# "age": {persona["age"]},
|
||||
# "description": {persona["description"]},
|
||||
# "title": {persona["title"]},
|
||||
# "email": {persona["email"]},
|
||||
# "phone": {persona["phone"]}
|
||||
# }}
|
||||
# ```
|
||||
# """
|
||||
# if original_prompt:
|
||||
# prompt += f"""
|
||||
# Make sure at least one of the candidate's job descriptions take into account the following: {original_prompt}."""
|
||||
# try:
|
||||
# async for message in self.call_llm(
|
||||
# message=message, system_prompt=generate_resume_system_prompt, prompt=prompt
|
||||
# ):
|
||||
# if message.status != "done":
|
||||
# yield message
|
||||
# if message.status == "error":
|
||||
# raise Exception(message.response)
|
||||
|
||||
# except Exception as e:
|
||||
# message.response = f"Unable to parse LLM returned content: {json_str} {str(e)}"
|
||||
# message.status = "error"
|
||||
# logger.error(traceback.format_exc())
|
||||
# logger.error(message.response)
|
||||
# yield message
|
||||
# return
|
||||
|
||||
# resume = self.extract_markdown_from_text(message.response)
|
||||
# if resume:
|
||||
# user_resume_dir = os.path.join(defines.user_dir, persona["username"], defines.resume_doc_dir)
|
||||
# os.makedirs(user_resume_dir, exist_ok=True)
|
||||
# user_resume_file = os.path.join(user_resume_dir, defines.resume_doc)
|
||||
# with open(user_resume_file, "w") as f:
|
||||
# f.write(resume)
|
||||
|
||||
# # Resume generated
|
||||
# message.response = resume
|
||||
# message.status = "partial"
|
||||
# yield message
|
||||
|
||||
# #
|
||||
# # Generate RAG database
|
||||
# #
|
||||
# message.status = "thinking"
|
||||
# message.response = f"Generating RAG content from resume..."
|
||||
# yield message
|
||||
|
||||
# # Prior to instancing a new User, the json data has to be created
|
||||
# # so the system can process it
|
||||
# user_dir = os.path.join(defines.user_dir, persona["username"])
|
||||
# os.makedirs(user_dir, exist_ok=True)
|
||||
# user_info = os.path.join(user_dir, "info.json")
|
||||
# with open(user_info, "w") as f:
|
||||
# f.write(json.dumps(persona, indent=2))
|
||||
|
||||
# user = User(llm=self.llm, username=self.username)
|
||||
# await user.initialize()
|
||||
# await user.file_watcher.initialize_collection()
|
||||
# # RAG content generated
|
||||
# message.response = f"{user.file_watcher.collection.count()} entries created in RAG vector store."
|
||||
|
||||
# #
|
||||
# # Write out the completed user information
|
||||
# #
|
||||
# with open(user_info, "w") as f:
|
||||
# f.write(json.dumps(persona, indent=2))
|
||||
|
||||
# # Image generated
|
||||
# message.status = "done"
|
||||
# message.response = json.dumps(persona)
|
||||
|
||||
# except Exception as e:
|
||||
# message.status = "error"
|
||||
# logger.error(traceback.format_exc())
|
||||
# logger.error(message.response)
|
||||
# message.response = f"Error in persona generation: {str(e)}"
|
||||
# logger.error(message.response)
|
||||
# yield message
|
||||
# return
|
||||
|
||||
# # Done processing, add message to conversation
|
||||
# self.context.processing = False
|
||||
# # Return the final message
|
||||
# yield message
|
||||
# return
|
||||
|
||||
def extract_json_from_text(self, text: str) -> str:
|
||||
"""Extract JSON string from text that may contain other content."""
|
||||
json_pattern = r"```json\s*([\s\S]*?)\s*```"
|
||||
match = re.search(json_pattern, text)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
|
||||
# Try to find JSON without the markdown code block
|
||||
json_pattern = r"({[\s\S]*})"
|
||||
match = re.search(json_pattern, text)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
|
||||
raise ValueError("No JSON found in the response")
|
||||
|
||||
def extract_markdown_from_text(self, text: str) -> str:
|
||||
"""Extract Markdown string from text that may contain other content."""
|
||||
markdown_pattern = r"```(md|markdown)\s*([\s\S]*?)\s*```"
|
||||
match = re.search(markdown_pattern, text)
|
||||
if match:
|
||||
return match.group(2).strip()
|
||||
|
||||
raise ValueError("No Markdown found in the response")
|
||||
|
||||
# Register the base agent
|
||||
agent_registry.register(GeneratePersona._agent_type, GeneratePersona)
|
@ -812,8 +812,8 @@ class RedisDatabase:
|
||||
'''Delete a specific chat message from Redis'''
|
||||
try:
|
||||
# Remove from the session's message list
|
||||
await self.redis.lrem(f"chat_messages:{session_id}", 0, message_id)
|
||||
|
||||
key = f"{self.KEY_PREFIXES['chat_messages']}{session_id}"
|
||||
await self.redis.lrem(key, 0, message_id)
|
||||
# Delete the message data itself
|
||||
result = await self.redis.delete(f"chat_message:{message_id}")
|
||||
return result > 0
|
||||
@ -959,7 +959,7 @@ class RedisDatabase:
|
||||
|
||||
return archived_count
|
||||
|
||||
# Enhanced User Operations
|
||||
# User Operations
|
||||
async def get_user_by_username(self, username: str) -> Optional[Dict]:
|
||||
"""Get user by username specifically"""
|
||||
username_key = f"{self.KEY_PREFIXES['users']}{username.lower()}"
|
||||
|
@ -31,7 +31,7 @@ import uuid
|
||||
import logging
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from typing import Dict, Any, Optional
|
||||
from pydantic import BaseModel, EmailStr, field_validator # type: ignore
|
||||
from pydantic import BaseModel, EmailStr, field_validator, ValidationError # type: ignore
|
||||
# Prometheus
|
||||
from prometheus_client import Summary # type: ignore
|
||||
from prometheus_fastapi_instrumentator import Instrumentator # type: ignore
|
||||
@ -67,7 +67,7 @@ from models import (
|
||||
LoginRequest, CreateCandidateRequest, CreateEmployerRequest,
|
||||
|
||||
# User models
|
||||
Candidate, Employer, BaseUserWithType, BaseUser, Guest, Authentication, AuthResponse,
|
||||
Candidate, Employer, BaseUserWithType, BaseUser, Guest, Authentication, AuthResponse, CandidateAI,
|
||||
|
||||
# Job models
|
||||
Job, JobApplication, ApplicationStatus,
|
||||
@ -256,6 +256,20 @@ async def get_current_user(
|
||||
logger.error(f"❌ Error getting current user: {e}")
|
||||
raise HTTPException(status_code=404, detail="User not found")
|
||||
|
||||
async def get_current_admin(
|
||||
user_id: str = Depends(verify_token_with_blacklist),
|
||||
database: RedisDatabase = Depends(lambda: db_manager.get_database())
|
||||
) -> BaseUserWithType:
|
||||
user = await get_current_user(user_id=user_id, database=database)
|
||||
if isinstance(user, Candidate) and user.is_admin:
|
||||
return user
|
||||
elif isinstance(user, Employer) and user.is_admin:
|
||||
return user
|
||||
else:
|
||||
logger.warning(f"⚠️ User {user_id} is not an admin")
|
||||
raise HTTPException(status_code=403, detail="Admin access required")
|
||||
|
||||
|
||||
# ============================
|
||||
# Helper Functions
|
||||
# ============================
|
||||
@ -632,6 +646,95 @@ async def refresh_token_endpoint(
|
||||
# ============================
|
||||
# Candidate Endpoints
|
||||
# ============================
|
||||
@api_router.post("/candidates/ai")
|
||||
async def create_candidate_ai(
|
||||
background_tasks: BackgroundTasks,
|
||||
user_message: ChatMessageUser = Body(...),
|
||||
admin: Candidate = Depends(get_current_admin),
|
||||
database: RedisDatabase = Depends(get_database)
|
||||
):
|
||||
"""Create a new candidate using AI-generated data"""
|
||||
try:
|
||||
generate_agent = agents.get_or_create_agent(
|
||||
agent_type=ChatContextType.GENERATE_PERSONA,
|
||||
prometheus_collector=prometheus_collector)
|
||||
|
||||
if not generate_agent:
|
||||
logger.warning(f"⚠️ Unable to create AI generation agent.")
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content=create_error_response("AGENT_NOT_FOUND", "Unable to create AI generation agent")
|
||||
)
|
||||
|
||||
persona_message = None
|
||||
async for generated_message in generate_agent.generate(
|
||||
llm=llm_manager.get_llm(),
|
||||
model=defines.model,
|
||||
user_message=user_message,
|
||||
user=None,
|
||||
):
|
||||
persona_message = generated_message
|
||||
|
||||
if not persona_message or persona_message.status != ChatStatusType.DONE:
|
||||
logger.error(f"❌ AI generation failed: {persona_message.content if persona_message else 'No message generated'}")
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content=create_error_response("AI_GENERATION_FAILED", "Failed to generate AI candidate data")
|
||||
)
|
||||
|
||||
try:
|
||||
current_time = datetime.now(timezone.utc)
|
||||
candidate_data = json.loads(persona_message.content)
|
||||
candidate_data.update({
|
||||
"userType": "candidate",
|
||||
"createdAt": current_time.isoformat(),
|
||||
"updatedAt": current_time.isoformat(),
|
||||
"status": "active", # Directly active for AI-generated candidates
|
||||
"isAdmin": False, # Default to non-admin
|
||||
"isAI": True, # Mark as AI-generated
|
||||
})
|
||||
candidate = CandidateAI.model_validate(candidate_data)
|
||||
except ValidationError as e:
|
||||
logger.error(f"❌ AI candidate data validation failed")
|
||||
for lines in traceback.format_exc().splitlines():
|
||||
logger.error(lines)
|
||||
logger.error(json.dumps(persona_message.content, indent=2))
|
||||
for error in e.errors():
|
||||
print(f"Field: {error['loc'][0]}, Error: {error['msg']}")
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content=create_error_response("AI_VALIDATION_FAILED", "AI-generated data validation failed")
|
||||
)
|
||||
except Exception as e:
|
||||
# Log the error and return a validation error response
|
||||
for lines in traceback.format_exc().splitlines():
|
||||
logger.error(lines)
|
||||
logger.error(json.dumps(persona_message.content, indent=2))
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content=create_error_response("AI_VALIDATION_FAILED", "AI-generated data validation failed")
|
||||
)
|
||||
|
||||
logger.info(f"🤖 AI-generated candidate {candidate.username} created with email {candidate.email}")
|
||||
candidate_data = candidate.model_dump()
|
||||
# Store in database
|
||||
await database.set_candidate(candidate.id, candidate_data)
|
||||
|
||||
logger.info(f"✅ AI-generated candidate created: {candidate_data['email']}")
|
||||
|
||||
return create_success_response({
|
||||
"message": "AI-generated candidate created successfully",
|
||||
"candidate": candidate_data
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(traceback.format_exc())
|
||||
logger.error(f"❌ AI Candidate creation error: {e}")
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content=create_error_response("AI_CREATION_FAILED", "Failed to create AI-generated candidate")
|
||||
)
|
||||
|
||||
@api_router.post("/candidates")
|
||||
async def create_candidate_with_verification(
|
||||
request: CreateCandidateRequest,
|
||||
@ -662,6 +765,10 @@ async def create_candidate_with_verification(
|
||||
# Generate candidate data (but don't activate yet)
|
||||
candidate_id = str(uuid.uuid4())
|
||||
current_time = datetime.now(timezone.utc)
|
||||
all_candidates = await database.get_all_candidates()
|
||||
is_admin = False
|
||||
if len(all_candidates) == 0:
|
||||
is_admin = True
|
||||
|
||||
candidate_data = {
|
||||
"id": candidate_id,
|
||||
@ -675,6 +782,7 @@ async def create_candidate_with_verification(
|
||||
"createdAt": current_time.isoformat(),
|
||||
"updatedAt": current_time.isoformat(),
|
||||
"status": "pending", # Not active until email verified
|
||||
"isAdmin": is_admin,
|
||||
}
|
||||
|
||||
# Generate verification token
|
||||
@ -703,7 +811,7 @@ async def create_candidate_with_verification(
|
||||
logger.info(f"✅ Candidate registration initiated for: {request.email}")
|
||||
|
||||
return create_success_response({
|
||||
"message": "Registration successful! Please check your email to verify your account.",
|
||||
"message": f"Registration successful! Please check your email to verify your account. {'As the first user on this sytem, you have admin priveledges.' if is_admin else ''}",
|
||||
"email": request.email,
|
||||
"verificationRequired": True
|
||||
})
|
||||
@ -2794,6 +2902,7 @@ async def post_chat_session_message_stream(
|
||||
try:
|
||||
chat_session_data = await database.get_chat_session(user_message.session_id)
|
||||
if not chat_session_data:
|
||||
logger.info("🔗 Chat session not found for session ID: " + user_message.session_id)
|
||||
return JSONResponse(
|
||||
status_code=404,
|
||||
content=create_error_response("NOT_FOUND", "Chat session not found")
|
||||
@ -2815,6 +2924,7 @@ async def post_chat_session_message_stream(
|
||||
candidate_data = await database.get_candidate(candidate_info["id"]) if candidate_info else None
|
||||
candidate : Candidate | None = Candidate.model_validate(candidate_data) if candidate_data else None
|
||||
if not candidate:
|
||||
logger.info(f"🔗 Candidate not found for chat session {user_message.session_id} with ID {candidate_info['id']}")
|
||||
return JSONResponse(
|
||||
status_code=404,
|
||||
content=create_error_response("CANDIDATE_NOT_FOUND", "Candidate not found for this chat session")
|
||||
@ -2825,6 +2935,7 @@ async def post_chat_session_message_stream(
|
||||
# Entity automatically released when done
|
||||
chat_agent = candidate_entity.get_or_create_agent(agent_type=chat_type)
|
||||
if not chat_agent:
|
||||
logger.info(f"🔗 No chat agent found for session {user_message.session_id} with type {chat_type}")
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content=create_error_response("AGENT_NOT_FOUND", "No agent found for this chat type")
|
||||
|
@ -695,9 +695,10 @@ class ChromaDBGetResponse(BaseModel):
|
||||
|
||||
class ChatContext(BaseModel):
|
||||
type: ChatContextType
|
||||
requires_relationship: bool = Field(False, alias="requiresRelationship")
|
||||
related_entity_id: Optional[str] = Field(None, alias="relatedEntityId")
|
||||
related_entity_type: Optional[Literal["job", "candidate", "employer"]] = Field(None, alias="relatedEntityType")
|
||||
additional_context: Optional[Dict[str, Any]] = Field(None, alias="additionalContext")
|
||||
additional_context: Optional[Dict[str, Any]] = Field({}, alias="additionalContext")
|
||||
model_config = {
|
||||
"populate_by_name": True # Allow both field names and aliases
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user