Almost working again

This commit is contained in:
James Ketr 2025-05-28 22:50:38 -07:00
parent b5b3a1f5dc
commit 02a278736e
26 changed files with 2082 additions and 466 deletions

View File

@ -1,15 +1,14 @@
import React, { useEffect, useState, useRef, useCallback } from 'react'; import React, { useEffect, useState, useRef, useCallback } from 'react';
import { Route, Routes, useLocation, useNavigate } from 'react-router-dom'; import { Route, Routes, useLocation, useNavigate } from 'react-router-dom';
import { ThemeProvider } from '@mui/material/styles'; import { ThemeProvider } from '@mui/material/styles';
import { Box } from '@mui/material';
import { backstoryTheme } from './BackstoryTheme'; import { backstoryTheme } from './BackstoryTheme';
import { SeverityType } from 'components/Snack'; import { SeverityType } from 'components/Snack';
import { Query } from 'types/types';
import { ConversationHandle } from 'components/Conversation'; import { ConversationHandle } from 'components/Conversation';
import { UserProvider } from 'hooks/useUser'; import { UserProvider } from 'hooks/useUser';
import { CandidateRoute } from 'routes/CandidateRoute'; import { CandidateRoute } from 'routes/CandidateRoute';
import { BackstoryLayout } from 'components/layout/BackstoryLayout'; import { BackstoryLayout } from 'components/layout/BackstoryLayout';
import { ChatQuery } from 'types/types';
import './BackstoryApp.css'; import './BackstoryApp.css';
import '@fontsource/roboto/300.css'; import '@fontsource/roboto/300.css';
@ -25,7 +24,7 @@ const BackstoryApp = () => {
const setSnack = useCallback((message: string, severity?: SeverityType) => { const setSnack = useCallback((message: string, severity?: SeverityType) => {
snackRef.current?.setSnack(message, severity); snackRef.current?.setSnack(message, severity);
}, [snackRef]); }, [snackRef]);
const submitQuery = (query: Query) => { const submitQuery = (query: ChatQuery) => {
console.log(`handleSubmitChatQuery:`, query, chatRef.current ? ' sending' : 'no handler'); console.log(`handleSubmitChatQuery:`, query, chatRef.current ? ' sending' : 'no handler');
chatRef.current?.submitQuery(query); chatRef.current?.submitQuery(query);
navigate('/chat'); navigate('/chat');

View File

@ -1,16 +1,16 @@
import Box from '@mui/material/Box'; import Box from '@mui/material/Box';
import Button from '@mui/material/Button'; import Button from '@mui/material/Button';
import { Query } from "../types/types"; import { ChatQuery } from "types/types";
type ChatSubmitQueryInterface = (query: Query) => void; type ChatSubmitQueryInterface = (query: ChatQuery) => void;
interface ChatQueryInterface { interface BackstoryQueryInterface {
query: Query, query: ChatQuery,
submitQuery?: ChatSubmitQueryInterface submitQuery?: ChatSubmitQueryInterface
} }
const ChatQuery = (props : ChatQueryInterface) => { const BackstoryQuery = (props : BackstoryQueryInterface) => {
const { query, submitQuery } = props; const { query, submitQuery } = props;
if (submitQuery === undefined) { if (submitQuery === undefined) {
@ -29,11 +29,11 @@ const ChatQuery = (props : ChatQueryInterface) => {
} }
export type { export type {
ChatQueryInterface, BackstoryQueryInterface,
ChatSubmitQueryInterface, ChatSubmitQueryInterface,
}; };
export { export {
ChatQuery, BackstoryQuery,
}; };

View File

@ -1,7 +1,7 @@
import React, { ReactElement, JSXElementConstructor } from 'react'; import React, { ReactElement, JSXElementConstructor } from 'react';
import Box from '@mui/material/Box'; import Box from '@mui/material/Box';
import { SxProps, Theme } from '@mui/material'; import { SxProps, Theme } from '@mui/material';
import { ChatSubmitQueryInterface } from './ChatQuery'; import { ChatSubmitQueryInterface } from './BackstoryQuery';
import { SetSnackType } from './Snack'; import { SetSnackType } from './Snack';
interface BackstoryElementProps { interface BackstoryElementProps {

View File

@ -14,8 +14,8 @@ import { BackstoryTextField, BackstoryTextFieldRef } from 'components/BackstoryT
import { BackstoryElementProps } from './BackstoryTab'; import { BackstoryElementProps } from './BackstoryTab';
import { connectionBase } from 'utils/Global'; import { connectionBase } from 'utils/Global';
import { useUser } from "hooks/useUser"; import { useUser } from "hooks/useUser";
import { ApiClient, StreamingResponse } from 'types/api-client'; import { StreamingResponse } from 'types/api-client';
import { ChatMessage, ChatContext, ChatSession, AIParameters, Query } from 'types/types'; import { ChatMessage, ChatContext, ChatSession, ChatQuery } from 'types/types';
import { PaginatedResponse } from 'types/conversion'; import { PaginatedResponse } from 'types/conversion';
import './Conversation.css'; import './Conversation.css';
@ -29,7 +29,7 @@ const loadingMessage: ChatMessage = { ...defaultMessage, content: "Establishing
type ConversationMode = 'chat' | 'job_description' | 'resume' | 'fact_check' | 'persona'; type ConversationMode = 'chat' | 'job_description' | 'resume' | 'fact_check' | 'persona';
interface ConversationHandle { interface ConversationHandle {
submitQuery: (query: Query) => void; submitQuery: (query: ChatQuery) => void;
fetchHistory: () => void; fetchHistory: () => void;
} }
@ -124,22 +124,7 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
} }
const createChatSession = async () => { const createChatSession = async () => {
try { try {
const aiParameters: AIParameters = { const chatContext: ChatContext = { type: "general" };
name: '',
model: 'qwen2.5',
temperature: 0.7,
topP: 1,
frequencyPenalty: 0,
presencePenalty: 0,
isDefault: true,
createdAt: new Date(),
updatedAt: new Date()
};
const chatContext: ChatContext = {
type: "general",
aiParameters
};
const response: ChatSession = await apiClient.createChatSession(chatContext); const response: ChatSession = await apiClient.createChatSession(chatContext);
setChatSession(response); setChatSession(response);
} catch (e) { } catch (e) {
@ -201,14 +186,14 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
}, [chatSession]); }, [chatSession]);
const handleEnter = (value: string) => { const handleEnter = (value: string) => {
const query: Query = { const query: ChatQuery = {
prompt: value prompt: value
} }
processQuery(query); processQuery(query);
}; };
useImperativeHandle(ref, () => ({ useImperativeHandle(ref, () => ({
submitQuery: (query: Query) => { submitQuery: (query: ChatQuery) => {
processQuery(query); processQuery(query);
}, },
fetchHistory: () => { getChatMessages(); } fetchHistory: () => { getChatMessages(); }
@ -253,7 +238,7 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
controllerRef.current = null; controllerRef.current = null;
}; };
const processQuery = (query: Query) => { const processQuery = (query: ChatQuery) => {
if (controllerRef.current || !chatSession || !chatSession.id) { if (controllerRef.current || !chatSession || !chatSession.id) {
return; return;
} }

View File

@ -2,14 +2,14 @@ import React from 'react';
import { MuiMarkdown } from 'mui-markdown'; import { MuiMarkdown } from 'mui-markdown';
import { useTheme } from '@mui/material/styles'; import { useTheme } from '@mui/material/styles';
import { Link } from '@mui/material'; import { Link } from '@mui/material';
import { ChatQuery } from './ChatQuery'; import { BackstoryQuery } from 'components/BackstoryQuery';
import Box from '@mui/material/Box'; import Box from '@mui/material/Box';
import JsonView from '@uiw/react-json-view'; import JsonView from '@uiw/react-json-view';
import { vscodeTheme } from '@uiw/react-json-view/vscode'; import { vscodeTheme } from '@uiw/react-json-view/vscode';
import { Mermaid } from './Mermaid'; import { Mermaid } from 'components/Mermaid';
import { Scrollable } from './Scrollable'; import { Scrollable } from 'components/Scrollable';
import { jsonrepair } from 'jsonrepair'; import { jsonrepair } from 'jsonrepair';
import { GenerateImage } from './GenerateImage'; import { GenerateImage } from 'components/GenerateImage';
import './StyledMarkdown.css'; import './StyledMarkdown.css';
import { BackstoryElementProps } from './BackstoryTab'; import { BackstoryElementProps } from './BackstoryTab';
@ -98,13 +98,13 @@ const StyledMarkdown: React.FC<StyledMarkdownProps> = (props: StyledMarkdownProp
} }
} }
}, },
ChatQuery: { BackstoryQuery: {
component: (props: { query: string }) => { component: (props: { query: string }) => {
const queryString = props.query.replace(/(\w+):/g, '"$1":'); const queryString = props.query.replace(/(\w+):/g, '"$1":');
try { try {
const query = JSON.parse(queryString); const query = JSON.parse(queryString);
return <ChatQuery submitQuery={submitQuery} query={query} /> return <BackstoryQuery submitQuery={submitQuery} query={query} />
} catch (e) { } catch (e) {
console.log("StyledMarkdown error:", queryString, e); console.log("StyledMarkdown error:", queryString, e);
return props.query; return props.query;

View File

@ -57,7 +57,7 @@ const getBackstoryDynamicRoutes = (props: BackstoryDynamicRoutesProps): ReactNod
routes.push(<Route key={`${index++}`} path="/login" element={<LoginPage />} />); routes.push(<Route key={`${index++}`} path="/login" element={<LoginPage />} />);
routes.push(<Route key={`${index++}`} path="*" element={<BetaPage />} />); routes.push(<Route key={`${index++}`} path="*" element={<BetaPage />} />);
} else { } else {
routes.push(<Route key={`${index++}`} path="/login" element={<LoginPage />} />);
routes.push(<Route key={`${index++}`} path="/logout" element={<LogoutPage />} />); routes.push(<Route key={`${index++}`} path="/logout" element={<LogoutPage />} />);
if (user.userType === 'candidate') { if (user.userType === 'candidate') {

View File

@ -42,8 +42,20 @@ const UserProvider: React.FC<UserProviderProps> = (props: UserProviderProps) =>
console.log("Guest =>", guest); console.log("Guest =>", guest);
}, [guest]); }, [guest]);
/* If the user changes to a non-null value, create a new
* apiClient with the access token */
useEffect(() => { useEffect(() => {
console.log("User => ", user); console.log("User => ", user);
if (user === null) {
return;
}
/* This apiClient will persist until the user is changed
* or logged out */
const accessToken = localStorage.getItem('accessToken');
if (!accessToken) {
throw Error("accessToken is not set for user!");
}
setApiClient(new ApiClient(accessToken));
}, [user]); }, [user]);
/* Handle logout if any consumers of UserProvider setUser to NULL */ /* Handle logout if any consumers of UserProvider setUser to NULL */
@ -55,12 +67,18 @@ const UserProvider: React.FC<UserProviderProps> = (props: UserProviderProps) =>
} }
const logout = async () => { const logout = async () => {
if (!user) { if (!activeUser) {
return; return;
} }
console.log(`Logging out ${user.email}`); console.log(`Logging out ${activeUser.email}`);
try { try {
const results = await apiClient.logout(); const accessToken = localStorage.getItem('accessToken');
const refreshToken = localStorage.getItem('refreshToken');
if (!accessToken || !refreshToken) {
setSnack("Authentication tokens are invalid.", "error");
return;
}
const results = await apiClient.logout(accessToken, refreshToken);
if (results.error) { if (results.error) {
console.error(results.error); console.error(results.error);
setSnack(results.error.message, "error") setSnack(results.error.message, "error")
@ -98,9 +116,9 @@ const UserProvider: React.FC<UserProviderProps> = (props: UserProviderProps) =>
}; };
const checkExistingAuth = () => { const checkExistingAuth = () => {
const token = localStorage.getItem('accessToken'); const accessToken = localStorage.getItem('accessToken');
const userData = localStorage.getItem('userData'); const userData = localStorage.getItem('userData');
if (token && userData) { if (accessToken && userData) {
try { try {
const user = JSON.parse(userData); const user = JSON.parse(userData);
// Convert dates back to Date objects if they're stored as strings // Convert dates back to Date objects if they're stored as strings
@ -113,7 +131,7 @@ const UserProvider: React.FC<UserProviderProps> = (props: UserProviderProps) =>
if (user.lastLogin && typeof user.lastLogin === 'string') { if (user.lastLogin && typeof user.lastLogin === 'string') {
user.lastLogin = new Date(user.lastLogin); user.lastLogin = new Date(user.lastLogin);
} }
setApiClient(new ApiClient(token)); setApiClient(new ApiClient(accessToken));
setUser(user); setUser(user);
} catch (e) { } catch (e) {
localStorage.removeItem('accessToken'); localStorage.removeItem('accessToken');

View File

@ -1,5 +1,5 @@
import React, { useState, useEffect } from 'react'; import React, { useState, useEffect } from 'react';
import { useNavigate } from 'react-router-dom'; import { useNavigate, useLocation } from 'react-router-dom';
import { import {
Box, Box,
Container, Container,
@ -35,7 +35,11 @@ const BetaPage: React.FC<BetaPageProps> = ({
const theme = useTheme(); const theme = useTheme();
const [showSparkle, setShowSparkle] = useState<boolean>(false); const [showSparkle, setShowSparkle] = useState<boolean>(false);
const navigate = useNavigate(); const navigate = useNavigate();
const location = useLocation();
if (!children) {
children = (<Box>Location: {location.pathname}</Box>);
}
console.log("BetaPage", children); console.log("BetaPage", children);
// Enhanced sparkle effect for background elements // Enhanced sparkle effect for background elements

View File

@ -6,7 +6,7 @@ import MuiMarkdown from 'mui-markdown';
import { BackstoryPageProps } from '../components/BackstoryTab'; import { BackstoryPageProps } from '../components/BackstoryTab';
import { Conversation, ConversationHandle } from '../components/Conversation'; import { Conversation, ConversationHandle } from '../components/Conversation';
import { ChatQuery } from '../components/ChatQuery'; import { BackstoryQuery } from '../components/BackstoryQuery';
import { CandidateInfo } from 'components/CandidateInfo'; import { CandidateInfo } from 'components/CandidateInfo';
import { useUser } from "../hooks/useUser"; import { useUser } from "../hooks/useUser";
@ -26,7 +26,7 @@ const ChatPage = forwardRef<ConversationHandle, BackstoryPageProps>((props: Back
setQuestions([ setQuestions([
<Box sx={{ display: "flex", flexDirection: isMobile ? "column" : "row" }}> <Box sx={{ display: "flex", flexDirection: isMobile ? "column" : "row" }}>
{candidate.questions?.map(({ question, tunables }, i: number) => {candidate.questions?.map(({ question, tunables }, i: number) =>
<ChatQuery key={i} query={{ prompt: question, tunables: tunables }} submitQuery={submitQuery} /> <BackstoryQuery key={i} query={{ prompt: question, tunables: tunables }} submitQuery={submitQuery} />
)} )}
</Box>, </Box>,
<Box sx={{ p: 1 }}> <Box sx={{ p: 1 }}>

View File

@ -19,7 +19,7 @@ import { StyledMarkdown } from 'components/StyledMarkdown';
import { Scrollable } from '../components/Scrollable'; import { Scrollable } from '../components/Scrollable';
import { Pulse } from 'components/Pulse'; import { Pulse } from 'components/Pulse';
import { StreamingResponse } from 'types/api-client'; import { StreamingResponse } from 'types/api-client';
import { ChatContext, ChatSession, AIParameters, Query } from 'types/types'; import { ChatContext, ChatSession, ChatQuery } from 'types/types';
import { useUser } from 'hooks/useUser'; import { useUser } from 'hooks/useUser';
const emptyUser: Candidate = { const emptyUser: Candidate = {
@ -72,12 +72,7 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
const createChatSession = async () => { const createChatSession = async () => {
try { try {
const aiParameters: AIParameters = { model: 'qwen2.5' }; const chatContext: ChatContext = { type: "generate_persona" };
const chatContext: ChatContext = {
type: "generate_persona",
aiParameters
};
const response: ChatSession = await apiClient.createChatSession(chatContext); const response: ChatSession = await apiClient.createChatSession(chatContext);
setChatSession(response); setChatSession(response);
setSnack(`Chat session created for generate_persona: ${response.id}`); setSnack(`Chat session created for generate_persona: ${response.id}`);
@ -90,7 +85,7 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
createChatSession(); createChatSession();
}, [chatSession, setChatSession]); }, [chatSession, setChatSession]);
const generatePersona = useCallback((query: Query) => { const generatePersona = useCallback((query: ChatQuery) => {
if (!chatSession || !chatSession.id) { if (!chatSession || !chatSession.id) {
return; return;
} }
@ -196,7 +191,7 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
if (processing) { if (processing) {
return; return;
} }
const query: Query = { const query: ChatQuery = {
prompt: value, prompt: value,
} }
generatePersona(query); generatePersona(query);

View File

@ -0,0 +1,57 @@
.PhoneInput:disabled {
opacity: 0.38;
}
/* .PhoneInput:not(:active):not(:focus):not(:hover) {
} */
.PhoneInput::placeholder {
color: rgba(46, 46, 46, 0.38);
}
.PhoneInput:focus,
.PhoneInput:active {
outline: 2px solid black;
}
.PhoneInput:hover:not(:active):not(:focus) {
outline: 1px solid black;
}
.PhoneInputInput {
font: inherit;
letter-spacing: inherit;
color: currentColor;
padding: 4px 0 5px;
border: 0;
box-sizing: content-box;
background: none;
height: 1.4375em;
margin: 0;
-webkit-tap-highlight-color: transparent;
display: block;
min-width: 0;
width: 100%;
-webkit-animation-name: mui-auto-fill-cancel;
animation-name: mui-auto-fill-cancel;
-webkit-animation-duration: 10ms;
animation-duration: 10ms;
padding: 16.5px 14px;
}
.PhoneInputCountry {
min-width: 64px;
justify-content: center;
}
.PhoneInputCountry:focus,
.PhoneInputCountry:active {
outline: 2px solid black;
}
.PhoneInput {
display: flex;
outline: 1px solid rgba(46, 46, 46, 0.38);
border: none;
}

View File

@ -0,0 +1,453 @@
import React, { useState, useEffect } from 'react';
import {
Box,
Container,
Paper,
TextField,
Button,
Typography,
Grid,
Alert,
CircularProgress,
Tabs,
Tab,
AppBar,
Toolbar,
Card,
CardContent,
Divider,
Avatar
} from '@mui/material';
import { Person, PersonAdd, AccountCircle, ExitToApp } from '@mui/icons-material';
import 'react-phone-number-input/style.css';
import PhoneInput from 'react-phone-number-input';
import { E164Number } from 'libphonenumber-js/core';
import './LoginPage.css';
import { ApiClient } from 'types/api-client';
import { useUser } from 'hooks/useUser';
// Import conversion utilities
import {
formatApiRequest,
parseApiResponse,
handleApiResponse,
extractApiData,
isSuccessResponse,
debugConversion,
type ApiResponse
} from 'types/conversion';
import {
AuthResponse, User, Guest, Candidate
} from 'types/types'
import { useNavigate } from 'react-router-dom';
interface LoginRequest {
login: string;
password: string;
}
interface RegisterRequest {
username: string;
email: string;
firstName: string;
lastName: string;
password: string;
phone?: string;
}
const apiClient = new ApiClient();
const LoginPage: React.FC = () => {
const navigate = useNavigate();
const { user, setUser, guest } = useUser();
const [tabValue, setTabValue] = useState(0);
const [loading, setLoading] = useState(false);
const [error, setError] = useState<string | null>(null);
const [success, setSuccess] = useState<string | null>(null);
const [phone, setPhone] = useState<E164Number | null>(null);
const name = (user?.userType === 'candidate' ? (user as Candidate).username : user?.email) || '';
// Login form state
const [loginForm, setLoginForm] = useState<LoginRequest>({
login: '',
password: ''
});
// Register form state
const [registerForm, setRegisterForm] = useState<RegisterRequest>({
username: '',
email: '',
firstName: '',
lastName: '',
password: '',
phone: ''
});
useEffect(() => {
if (phone !== registerForm.phone && phone) {
console.log({ phone });
setRegisterForm({ ...registerForm, phone });
}
}, [phone, registerForm]);
const handleLogin = async (e: React.FormEvent) => {
e.preventDefault();
setLoading(true);
setError(null);
setSuccess(null);
try {
const authResponse = await apiClient.login(loginForm.login, loginForm.password)
debugConversion(authResponse, 'Login Response');
// Store tokens in localStorage
localStorage.setItem('accessToken', authResponse.accessToken);
localStorage.setItem('refreshToken', authResponse.refreshToken);
localStorage.setItem('userData', JSON.stringify(authResponse.user));
setSuccess('Login successful!');
navigate('/');
setUser(authResponse.user);
// Clear form
setLoginForm({ login: '', password: '' });
} catch (err) {
console.error('Login error:', err);
setError(err instanceof Error ? err.message : 'Login failed');
} finally {
setLoading(false);
}
};
const handleRegister = async (e: React.FormEvent) => {
e.preventDefault();
setLoading(true);
setError(null);
setSuccess(null);
try {
const candidate: Candidate = {
username: registerForm.username,
email: registerForm.email,
firstName: registerForm.firstName,
lastName: registerForm.lastName,
fullName: `${registerForm.firstName} ${registerForm.lastName}`,
phone: registerForm.phone || undefined,
userType: 'candidate',
status: 'active',
createdAt: new Date(),
updatedAt: new Date(),
skills: [],
experience: [],
education: [],
preferredJobTypes: [],
languages: [],
certifications: [],
location: {
city: '',
country: '',
remote: true
}
};
const result = await apiClient.createCandidate(candidate);
debugConversion(result, 'Registration Response');
setSuccess('Registration successful! You can now login.');
// Clear form and switch to login tab
setRegisterForm({
username: '',
email: '',
firstName: '',
lastName: '',
password: '',
phone: ''
});
setTabValue(0);
} catch (err) {
console.error('Registration error:', err);
setError(err instanceof Error ? err.message : 'Registration failed');
} finally {
setLoading(false);
}
};
const handleTabChange = (event: React.SyntheticEvent, newValue: number) => {
setTabValue(newValue);
setError(null);
setSuccess(null);
};
// If user is logged in, show their profile
if (user) {
return (
<Container maxWidth="md" sx={{ mt: 4 }}>
<Card elevation={3}>
<CardContent>
<Box sx={{ display: 'flex', alignItems: 'center', mb: 3 }}>
<Avatar sx={{ mr: 2, bgcolor: 'primary.main' }}>
<AccountCircle />
</Avatar>
<Typography variant="h4" component="h1">
User Profile
</Typography>
</Box>
<Divider sx={{ mb: 3 }} />
<Grid container spacing={3}>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="body1" sx={{ mb: 1 }}>
<strong>Username:</strong> {name}
</Typography>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="body1" sx={{ mb: 1 }}>
<strong>Email:</strong> {user.email}
</Typography>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="body1" sx={{ mb: 1 }}>
<strong>Status:</strong> {user.status}
</Typography>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="body1" sx={{ mb: 1 }}>
<strong>Phone:</strong> {user.phone || 'Not provided'}
</Typography>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="body1" sx={{ mb: 1 }}>
<strong>Last Login:</strong> {
user.lastLogin
? user.lastLogin.toLocaleString()
: 'N/A'
}
</Typography>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="body1" sx={{ mb: 1 }}>
<strong>Member Since:</strong> {user.createdAt.toLocaleDateString()}
</Typography>
</Grid>
</Grid>
</CardContent>
</Card>
</Container>
);
}
const validateInput = (value: string) => {
if (!value) return 'This field is required';
// Username: alphanumeric, 3-20 characters, no @
const usernameRegex = /^[a-zA-Z0-9]{3,20}$/;
// Email: basic email format
const emailRegex = /^[^\s@]+@[^\s@]+\.[^\s@]+$/;
if (usernameRegex.test(value)) return '';
if (emailRegex.test(value)) return '';
return 'Enter a valid username (3-20 alphanumeric characters) or email';
};
const handleLoginChange = (event: React.ChangeEvent<HTMLInputElement>) => {
const { value } = event.target;
setLoginForm({ ...loginForm, login: value });
setError(validateInput(value));
};
return (
<Container maxWidth="sm" sx={{ mt: 4 }}>
<Paper elevation={3} sx={{ p: 4 }}>
<Typography variant="h4" component="h1" gutterBottom align="center" color="primary">
Backstory
</Typography>
{guest && (
<Card sx={{ mb: 3, bgcolor: 'grey.50' }} elevation={1}>
<CardContent>
<Typography variant="h6" gutterBottom color="primary">
Guest Session Active
</Typography>
<Typography variant="body2" color="text.secondary" sx={{ mb: 0.5 }}>
Session ID: {guest.sessionId}
</Typography>
<Typography variant="body2" color="text.secondary">
Created: {guest.createdAt.toLocaleString()}
</Typography>
</CardContent>
</Card>
)}
<Box sx={{ borderBottom: 1, borderColor: 'divider', mb: 3 }}>
<Tabs value={tabValue} onChange={handleTabChange} centered>
<Tab icon={<Person />} label="Login" />
<Tab icon={<PersonAdd />} label="Register" />
</Tabs>
</Box>
{error && (
<Alert severity="error" sx={{ mb: 2 }}>
{error}
</Alert>
)}
{success && (
<Alert severity="success" sx={{ mb: 2 }}>
{success}
</Alert>
)}
{tabValue === 0 && (
<Box component="form" onSubmit={handleLogin}>
<Typography variant="h5" gutterBottom>
Sign In
</Typography>
<TextField
fullWidth
label="Username or Email"
type="text"
value={loginForm.login}
onChange={handleLoginChange}
margin="normal"
required
disabled={loading}
variant="outlined"
placeholder="Enter username or email"
/>
<TextField
fullWidth
label="Password"
type="password"
value={loginForm.password}
onChange={(e) => setLoginForm({ ...loginForm, password: e.target.value })}
margin="normal"
required
disabled={loading}
variant="outlined"
autoComplete='current-password'
/>
<Button
type="submit"
fullWidth
variant="contained"
sx={{ mt: 3, mb: 2 }}
disabled={loading}
startIcon={loading ? <CircularProgress size={20} color="inherit" /> : <Person />}
>
{loading ? 'Signing In...' : 'Sign In'}
</Button>
</Box>
)}
{tabValue === 1 && (
<Box component="form" onSubmit={handleRegister}>
<Typography variant="h5" gutterBottom>
Create Account
</Typography>
<Grid container spacing={2} sx={{ mb: 2 }}>
<Grid size={{ xs: 12, sm: 6 }}>
<TextField
fullWidth
label="First Name"
value={registerForm.firstName}
onChange={(e) => setRegisterForm({ ...registerForm, firstName: e.target.value })}
required
disabled={loading}
variant="outlined"
/>
</Grid>
<Grid size={{ xs: 12, sm: 6 }}>
<TextField
fullWidth
label="Last Name"
value={registerForm.lastName}
onChange={(e) => setRegisterForm({ ...registerForm, lastName: e.target.value })}
required
disabled={loading}
variant="outlined"
/>
</Grid>
</Grid>
<TextField
fullWidth
label="Username"
value={registerForm.username}
onChange={(e) => setRegisterForm({ ...registerForm, username: e.target.value })}
margin="normal"
required
disabled={loading}
variant="outlined"
/>
<TextField
fullWidth
label="Email"
type="email"
value={registerForm.email}
onChange={(e) => setRegisterForm({ ...registerForm, email: e.target.value })}
margin="normal"
required
disabled={loading}
variant="outlined"
/>
<PhoneInput
label="Phone (Optional)"
placeholder="Enter phone number"
defaultCountry='US'
value={registerForm.phone}
disabled={loading}
onChange={(v) => setPhone(v as E164Number)} />
{/* <TextField
fullWidth
label="Phone (Optional)"
type="tel"
value={registerForm.phone}
onChange={(e) => setRegisterForm({ ...registerForm, phone: e.target.value })}
margin="normal"
disabled={loading}
variant="outlined"
/> */}
<TextField
fullWidth
label="Password"
type="password"
value={registerForm.password}
onChange={(e) => setRegisterForm({ ...registerForm, password: e.target.value })}
margin="normal"
required
disabled={loading}
variant="outlined"
/>
<Button
type="submit"
fullWidth
variant="contained"
sx={{ mt: 3, mb: 2 }}
disabled={loading}
startIcon={loading ? <CircularProgress size={20} color="inherit" /> : <PersonAdd />}
>
{loading ? 'Creating Account...' : 'Create Account'}
</Button>
</Box>
)}
</Paper>
</Container>
);
};
export { LoginPage };

View File

@ -6,11 +6,11 @@ import {
} from '@mui/material'; } from '@mui/material';
import { SxProps } from '@mui/material'; import { SxProps } from '@mui/material';
import { ChatQuery } from '../components/ChatQuery'; import { BackstoryQuery } from 'components/BackstoryQuery';
import { MessageList, BackstoryMessage } from '../components/Message'; import { MessageList, BackstoryMessage } from 'components/Message';
import { Conversation } from '../components/Conversation'; import { Conversation } from 'components/Conversation';
import { BackstoryPageProps } from '../components/BackstoryTab'; import { BackstoryPageProps } from 'components/BackstoryTab';
import { Query } from "../types/types"; import { ChatQuery } from "types/types";
import './ResumeBuilderPage.css'; import './ResumeBuilderPage.css';
@ -43,17 +43,17 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
setActiveTab(newValue); setActiveTab(newValue);
}; };
const handleJobQuery = (query: Query) => { const handleJobQuery = (query: ChatQuery) => {
console.log(`handleJobQuery: ${query.prompt} -- `, jobConversationRef.current ? ' sending' : 'no handler'); console.log(`handleJobQuery: ${query.prompt} -- `, jobConversationRef.current ? ' sending' : 'no handler');
jobConversationRef.current?.submitQuery(query); jobConversationRef.current?.submitQuery(query);
}; };
const handleResumeQuery = (query: Query) => { const handleResumeQuery = (query: ChatQuery) => {
console.log(`handleResumeQuery: ${query.prompt} -- `, resumeConversationRef.current ? ' sending' : 'no handler'); console.log(`handleResumeQuery: ${query.prompt} -- `, resumeConversationRef.current ? ' sending' : 'no handler');
resumeConversationRef.current?.submitQuery(query); resumeConversationRef.current?.submitQuery(query);
}; };
const handleFactsQuery = (query: Query) => { const handleFactsQuery = (query: ChatQuery) => {
console.log(`handleFactsQuery: ${query.prompt} -- `, factsConversationRef.current ? ' sending' : 'no handler'); console.log(`handleFactsQuery: ${query.prompt} -- `, factsConversationRef.current ? ' sending' : 'no handler');
factsConversationRef.current?.submitQuery(query); factsConversationRef.current?.submitQuery(query);
}; };
@ -202,8 +202,8 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
// console.log('renderJobDescriptionView'); // console.log('renderJobDescriptionView');
// const jobDescriptionQuestions = [ // const jobDescriptionQuestions = [
// <Box sx={{ display: "flex", flexDirection: "column" }}> // <Box sx={{ display: "flex", flexDirection: "column" }}>
// <ChatQuery query={{ prompt: "What are the key skills necessary for this position?", tunables: { enableTools: false } }} submitQuery={handleJobQuery} /> // <BackstoryQuery query={{ prompt: "What are the key skills necessary for this position?", tunables: { enableTools: false } }} submitQuery={handleJobQuery} />
// <ChatQuery query={{ prompt: "How much should this position pay (accounting for inflation)?", tunables: { enableTools: false } }} submitQuery={handleJobQuery} /> // <BackstoryQuery query={{ prompt: "How much should this position pay (accounting for inflation)?", tunables: { enableTools: false } }} submitQuery={handleJobQuery} />
// </Box>, // </Box>,
// ]; // ];
@ -213,7 +213,7 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
// 1. **Job Analysis**: LLM extracts requirements from '\`Job Description\`' to generate a list of desired '\`Skills\`'. // 1. **Job Analysis**: LLM extracts requirements from '\`Job Description\`' to generate a list of desired '\`Skills\`'.
// 2. **Candidate Analysis**: LLM determines candidate qualifications by performing skill assessments. // 2. **Candidate Analysis**: LLM determines candidate qualifications by performing skill assessments.
// For each '\`Skill\`' from **Job Analysis** phase: // For each '\`Skill\`' from **Job Analysis** phase:
// 1. **RAG**: Retrieval Augmented Generation collection is queried for context related content for each '\`Skill\`'. // 1. **RAG**: Retrieval Augmented Generation collection is queried for context related content for each '\`Skill\`'.
@ -274,8 +274,8 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
// const renderResumeView = useCallback((sx?: SxProps) => { // const renderResumeView = useCallback((sx?: SxProps) => {
// const resumeQuestions = [ // const resumeQuestions = [
// <Box sx={{ display: "flex", flexDirection: "column" }}> // <Box sx={{ display: "flex", flexDirection: "column" }}>
// <ChatQuery query={{ prompt: "Is this resume a good fit for the provided job description?", tunables: { enableTools: false } }} submitQuery={handleResumeQuery} /> // <BackstoryQuery query={{ prompt: "Is this resume a good fit for the provided job description?", tunables: { enableTools: false } }} submitQuery={handleResumeQuery} />
// <ChatQuery query={{ prompt: "Provide a more concise resume.", tunables: { enableTools: false } }} submitQuery={handleResumeQuery} /> // <BackstoryQuery query={{ prompt: "Provide a more concise resume.", tunables: { enableTools: false } }} submitQuery={handleResumeQuery} />
// </Box>, // </Box>,
// ]; // ];
@ -323,7 +323,7 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
// const renderFactCheckView = useCallback((sx?: SxProps) => { // const renderFactCheckView = useCallback((sx?: SxProps) => {
// const factsQuestions = [ // const factsQuestions = [
// <Box sx={{ display: "flex", flexDirection: "column" }}> // <Box sx={{ display: "flex", flexDirection: "column" }}>
// <ChatQuery query={{ prompt: "Rewrite the resume to address any discrepancies.", tunables: { enableTools: false } }} submitQuery={handleFactsQuery} /> // <BackstoryQuery query={{ prompt: "Rewrite the resume to address any discrepancies.", tunables: { enableTools: false } }} submitQuery={handleFactsQuery} />
// </Box>, // </Box>,
// ]; // ];

View File

@ -36,7 +36,7 @@ const CandidateRoute: React.FC<CandidateRouteProps> = (props: CandidateRouteProp
} }
getCandidate(username); getCandidate(username);
}, [candidate, username, setCandidate, navigate, setSnack]); }, [candidate, username, setCandidate, navigate, setSnack, apiClient]);
if (candidate === null) { if (candidate === null) {
return (<Box> return (<Box>

View File

@ -59,7 +59,7 @@ class ApiClient {
private baseUrl: string; private baseUrl: string;
private defaultHeaders: Record<string, string>; private defaultHeaders: Record<string, string>;
constructor(authToken?: string) { constructor(accessToken?: string) {
const loc = window.location; const loc = window.location;
if (!loc.host.match(/.*battle-linux.*/)) { if (!loc.host.match(/.*battle-linux.*/)) {
this.baseUrl = loc.protocol + "//" + loc.host + "/api/1.0"; this.baseUrl = loc.protocol + "//" + loc.host + "/api/1.0";
@ -68,7 +68,7 @@ class ApiClient {
} }
this.defaultHeaders = { this.defaultHeaders = {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
...(authToken && { 'Authorization': `Bearer ${authToken}` }) ...(accessToken && { 'Authorization': `Bearer ${accessToken}` })
}; };
} }
@ -86,10 +86,12 @@ class ApiClient {
return handleApiResponse<Types.AuthResponse>(response); return handleApiResponse<Types.AuthResponse>(response);
} }
async logout(): Promise<Types.ApiResponse> { async logout(accessToken: string, refreshToken: string): Promise<Types.ApiResponse> {
console.log(this.defaultHeaders);
const response = await fetch(`${this.baseUrl}/auth/logout`, { const response = await fetch(`${this.baseUrl}/auth/logout`, {
method: 'POST', method: 'POST',
headers: this.defaultHeaders, headers: this.defaultHeaders,
body: JSON.stringify(formatApiRequest({ accessToken, refreshToken }))
}); });
return handleApiResponse<Types.ApiResponse>(response); return handleApiResponse<Types.ApiResponse>(response);
@ -324,11 +326,11 @@ class ApiClient {
/** /**
* Send message with standard response (non-streaming) * Send message with standard response (non-streaming)
*/ */
async sendMessage(sessionId: string, query: Types.Query): Promise<Types.ChatMessage> { async sendMessage(sessionId: string, query: Types.ChatQuery): Promise<Types.ChatMessage> {
const response = await fetch(`${this.baseUrl}/chat/sessions/${sessionId}/messages`, { const response = await fetch(`${this.baseUrl}/chat/sessions/${sessionId}/messages`, {
method: 'POST', method: 'POST',
headers: this.defaultHeaders, headers: this.defaultHeaders,
body: JSON.stringify(formatApiRequest({ query })) body: JSON.stringify(formatApiRequest({query}))
}); });
return handleApiResponse<Types.ChatMessage>(response); return handleApiResponse<Types.ChatMessage>(response);
@ -339,7 +341,7 @@ class ApiClient {
*/ */
sendMessageStream( sendMessageStream(
sessionId: string, sessionId: string,
query: Types.Query, query: Types.ChatQuery,
options: StreamingOptions = {} options: StreamingOptions = {}
): StreamingResponse { ): StreamingResponse {
const abortController = new AbortController(); const abortController = new AbortController();
@ -488,7 +490,7 @@ class ApiClient {
*/ */
async sendMessageAuto( async sendMessageAuto(
sessionId: string, sessionId: string,
query: Types.Query, query: Types.ChatQuery,
options?: StreamingOptions options?: StreamingOptions
): Promise<Types.ChatMessage> { ): Promise<Types.ChatMessage> {
// If streaming options are provided, use streaming // If streaming options are provided, use streaming
@ -512,36 +514,6 @@ class ApiClient {
return handlePaginatedApiResponse<Types.ChatMessage>(response); return handlePaginatedApiResponse<Types.ChatMessage>(response);
} }
// ============================
// AI Configuration Methods
// ============================
async createAIParameters(params: Omit<Types.AIParameters, 'id' | 'createdAt' | 'updatedAt'>): Promise<Types.AIParameters> {
const response = await fetch(`${this.baseUrl}/ai/parameters`, {
method: 'POST',
headers: this.defaultHeaders,
body: JSON.stringify(formatApiRequest(params))
});
return handleApiResponse<Types.AIParameters>(response);
}
async getAIParameters(id: string): Promise<Types.AIParameters> {
const response = await fetch(`${this.baseUrl}/ai/parameters/${id}`, {
headers: this.defaultHeaders
});
return handleApiResponse<Types.AIParameters>(response);
}
async getUserAIParameters(userId: string): Promise<Types.AIParameters[]> {
const response = await fetch(`${this.baseUrl}/users/${userId}/ai/parameters`, {
headers: this.defaultHeaders
});
return handleApiResponse<Types.AIParameters[]>(response);
}
// ============================ // ============================
// Error Handling Helper // Error Handling Helper
// ============================ // ============================
@ -589,7 +561,7 @@ export function useStreamingChat(sessionId: string) {
const apiClient = useApiClient(); const apiClient = useApiClient();
const streamingRef = useRef<StreamingResponse | null>(null); const streamingRef = useRef<StreamingResponse | null>(null);
const sendMessage = useCallback(async (query: Types.Query) => { const sendMessage = useCallback(async (query: Types.ChatQuery) => {
setError(null); setError(null);
setIsStreaming(true); setIsStreaming(true);
setCurrentMessage(null); setCurrentMessage(null);

View File

@ -1,6 +1,6 @@
// Generated TypeScript types from Pydantic models // Generated TypeScript types from Pydantic models
// Source: src/backend/models.py // Source: src/backend/models.py
// Generated on: 2025-05-29T02:05:50.622601 // Generated on: 2025-05-29T05:47:25.809967
// DO NOT EDIT MANUALLY - This file is auto-generated // DO NOT EDIT MANUALLY - This file is auto-generated
// ============================ // ============================
@ -17,7 +17,7 @@ export type ChatContextType = "job_search" | "candidate_screening" | "interview_
export type ChatSenderType = "user" | "ai" | "system"; export type ChatSenderType = "user" | "ai" | "system";
export type ChatStatusType = "partial" | "done" | "streaming" | "thinking" | "error"; export type ChatStatusType = "preparing" | "thinking" | "partial" | "streaming" | "done" | "error";
export type ColorBlindMode = "protanopia" | "deuteranopia" | "tritanopia" | "none"; export type ColorBlindMode = "protanopia" | "deuteranopia" | "tritanopia" | "none";
@ -63,141 +63,123 @@ export type VectorStoreType = "pinecone" | "qdrant" | "faiss" | "milvus" | "weav
// Interfaces // Interfaces
// ============================ // ============================
export interface AIParameters {
id?: string;
userId?: string;
name?: string;
description?: string;
model?: "qwen2.5" | "flux-schnell";
temperature?: number;
maxTokens?: number;
topP?: number;
frequencyPenalty?: number;
presencePenalty?: number;
systemPrompt?: string;
isDefault?: boolean;
createdAt?: Date;
updatedAt?: Date;
customModelConfig?: Record<string, any>;
}
export interface AccessibilitySettings { export interface AccessibilitySettings {
fontSize?: "small" | "medium" | "large"; fontSize: "small" | "medium" | "large";
highContrast?: boolean; highContrast: boolean;
reduceMotion?: boolean; reduceMotion: boolean;
screenReader?: boolean; screenReader: boolean;
colorBlindMode?: "protanopia" | "deuteranopia" | "tritanopia" | "none"; colorBlindMode?: "protanopia" | "deuteranopia" | "tritanopia" | "none";
} }
export interface Analytics { export interface Analytics {
id?: string; id?: string;
entityType?: "job" | "candidate" | "chat" | "system" | "employer"; entityType: "job" | "candidate" | "chat" | "system" | "employer";
entityId?: string; entityId: string;
metricType?: string; metricType: string;
value?: number; value: number;
timestamp?: Date; timestamp: Date;
dimensions?: Record<string, any>; dimensions?: Record<string, any>;
segment?: string; segment?: string;
} }
export interface ApiResponse { export interface ApiResponse {
success?: boolean; success: boolean;
data?: any; data?: any;
error?: ErrorDetail; error?: ErrorDetail;
meta?: Record<string, any>; meta?: Record<string, any>;
} }
export interface ApplicationDecision { export interface ApplicationDecision {
status?: "accepted" | "rejected"; status: "accepted" | "rejected";
reason?: string; reason?: string;
date?: Date; date: Date;
by?: string; by: string;
} }
export interface Attachment { export interface Attachment {
id?: string; id?: string;
fileName?: string; fileName: string;
fileType?: string; fileType: string;
fileSize?: number; fileSize: number;
fileUrl?: string; fileUrl: string;
uploadedAt?: Date; uploadedAt: Date;
isProcessed?: boolean; isProcessed: boolean;
processingResult?: any; processingResult?: any;
thumbnailUrl?: string; thumbnailUrl?: string;
} }
export interface AuthResponse { export interface AuthResponse {
accessToken?: string; accessToken: string;
refreshToken?: string; refreshToken: string;
user?: any; user: any;
expiresAt?: number; expiresAt: number;
} }
export interface Authentication { export interface Authentication {
userId?: string; userId: string;
passwordHash?: string; passwordHash: string;
salt?: string; salt: string;
refreshTokens?: Array<RefreshToken>; refreshTokens: Array<RefreshToken>;
resetPasswordToken?: string; resetPasswordToken?: string;
resetPasswordExpiry?: Date; resetPasswordExpiry?: Date;
lastPasswordChange?: Date; lastPasswordChange: Date;
mfaEnabled?: boolean; mfaEnabled: boolean;
mfaMethod?: "app" | "sms" | "email"; mfaMethod?: "app" | "sms" | "email";
mfaSecret?: string; mfaSecret?: string;
loginAttempts?: number; loginAttempts: number;
lockedUntil?: Date; lockedUntil?: Date;
} }
export interface BaseUser { export interface BaseUser {
id?: string; id?: string;
email?: string; email: string;
phone?: string; phone?: string;
createdAt?: Date; createdAt: Date;
updatedAt?: Date; updatedAt: Date;
lastLogin?: Date; lastLogin?: Date;
profileImage?: string; profileImage?: string;
status?: "active" | "inactive" | "pending" | "banned"; status: "active" | "inactive" | "pending" | "banned";
} }
export interface BaseUserWithType { export interface BaseUserWithType {
id?: string; id?: string;
email?: string; email: string;
phone?: string; phone?: string;
createdAt?: Date; createdAt: Date;
updatedAt?: Date; updatedAt: Date;
lastLogin?: Date; lastLogin?: Date;
profileImage?: string; profileImage?: string;
status?: "active" | "inactive" | "pending" | "banned"; status: "active" | "inactive" | "pending" | "banned";
userType?: "candidate" | "employer" | "guest"; userType: "candidate" | "employer" | "guest";
} }
export interface Candidate { export interface Candidate {
id?: string; id?: string;
email?: string; email: string;
phone?: string; phone?: string;
createdAt?: Date; createdAt: Date;
updatedAt?: Date; updatedAt: Date;
lastLogin?: Date; lastLogin?: Date;
profileImage?: string; profileImage?: string;
status?: "active" | "inactive" | "pending" | "banned"; status: "active" | "inactive" | "pending" | "banned";
userType?: "candidate"; userType?: "candidate";
username?: string; username: string;
firstName?: string; firstName: string;
lastName?: string; lastName: string;
fullName?: string; fullName: string;
description?: string; description?: string;
resume?: string; resume?: string;
skills?: Array<Skill>; skills: Array<Skill>;
experience?: Array<WorkExperience>; experience: Array<WorkExperience>;
questions?: Array<CandidateQuestion>; questions?: Array<CandidateQuestion>;
education?: Array<Education>; education: Array<Education>;
preferredJobTypes?: Array<"full-time" | "part-time" | "contract" | "internship" | "freelance">; preferredJobTypes: Array<"full-time" | "part-time" | "contract" | "internship" | "freelance">;
desiredSalary?: DesiredSalary; desiredSalary?: DesiredSalary;
location?: Location; location: Location;
availabilityDate?: Date; availabilityDate?: Date;
summary?: string; summary?: string;
languages?: Array<Language>; languages: Array<Language>;
certifications?: Array<Certification>; certifications: Array<Certification>;
jobApplications?: Array<JobApplication>; jobApplications?: Array<JobApplication>;
hasProfile?: boolean; hasProfile?: boolean;
age?: number; age?: number;
@ -206,24 +188,24 @@ export interface Candidate {
} }
export interface CandidateContact { export interface CandidateContact {
email?: string; email: string;
phone?: string; phone?: string;
} }
export interface CandidateListResponse { export interface CandidateListResponse {
success?: boolean; success: boolean;
data?: Array<Candidate>; data?: Array<Candidate>;
error?: ErrorDetail; error?: ErrorDetail;
meta?: Record<string, any>; meta?: Record<string, any>;
} }
export interface CandidateQuestion { export interface CandidateQuestion {
question?: string; question: string;
tunables?: Tunables; tunables?: Tunables;
} }
export interface CandidateResponse { export interface CandidateResponse {
success?: boolean; success: boolean;
data?: Candidate; data?: Candidate;
error?: ErrorDetail; error?: ErrorDetail;
meta?: Record<string, any>; meta?: Record<string, any>;
@ -231,35 +213,64 @@ export interface CandidateResponse {
export interface Certification { export interface Certification {
id?: string; id?: string;
name?: string; name: string;
issuingOrganization?: string; issuingOrganization: string;
issueDate?: Date; issueDate: Date;
expirationDate?: Date; expirationDate?: Date;
credentialId?: string; credentialId?: string;
credentialUrl?: string; credentialUrl?: string;
} }
export interface ChatContext { export interface ChatContext {
type?: "job_search" | "candidate_screening" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile"; type: "job_search" | "candidate_screening" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile";
relatedEntityId?: string; relatedEntityId?: string;
relatedEntityType?: "job" | "candidate" | "employer"; relatedEntityType?: "job" | "candidate" | "employer";
aiParameters?: AIParameters;
additionalContext?: Record<string, any>; additionalContext?: Record<string, any>;
} }
export interface ChatMessage { export interface ChatMessage {
id?: string; id?: string;
sessionId?: string; sessionId: string;
status?: "partial" | "done" | "streaming" | "thinking" | "error"; status: "preparing" | "thinking" | "partial" | "streaming" | "done" | "error";
sender?: "user" | "ai" | "system"; sender: "user" | "ai" | "system";
senderId?: string; senderId?: string;
prompt?: string;
content?: string; content?: string;
timestamp?: Date; chunk?: string;
attachments?: Array<Attachment>; timestamp: Date;
reactions?: Array<MessageReaction>;
isEdited?: boolean; isEdited?: boolean;
editHistory?: Array<EditHistory>; metadata?: ChatMessageMetaData;
metadata?: Record<string, any>; }
export interface ChatMessageMetaData {
model?: "qwen2.5" | "flux-schnell";
temperature?: number;
maxTokens?: number;
topP?: number;
frequencyPenalty?: number;
presencePenalty?: number;
stopSequences?: Array<string>;
tunables?: Tunables;
rag?: Array<ChromaDBGetResponse>;
evalCount?: number;
evalDuration?: number;
promptEvalCount?: number;
promptEvalDuration?: number;
options?: ChatOptions;
tools?: Record<string, any>;
timers?: Record<string, number>;
}
export interface ChatOptions {
seed?: number;
numCtx?: number;
temperature?: number;
}
export interface ChatQuery {
prompt: string;
tunables?: Tunables;
agentOptions?: Record<string, any>;
} }
export interface ChatSession { export interface ChatSession {
@ -269,51 +280,64 @@ export interface ChatSession {
createdAt?: Date; createdAt?: Date;
lastActivity?: Date; lastActivity?: Date;
title?: string; title?: string;
context?: ChatContext; context: ChatContext;
messages?: Array<ChatMessage>; messages?: Array<ChatMessage>;
isArchived?: boolean; isArchived?: boolean;
systemPrompt?: string; systemPrompt?: string;
} }
export interface ChromaDBGetResponse {
ids?: Array<string>;
embeddings?: Array<Array<number>>;
documents?: Array<string>;
metadatas?: Array<Record<string, any>>;
name?: string;
size?: number;
query?: string;
queryEmbedding?: Array<number>;
umapEmbedding2D?: Array<number>;
umapEmbedding3D?: Array<number>;
}
export interface CustomQuestion { export interface CustomQuestion {
question?: string; question: string;
answer?: string; answer: string;
} }
export interface DataSourceConfiguration { export interface DataSourceConfiguration {
id?: string; id?: string;
ragConfigId?: string; ragConfigId: string;
name?: string; name: string;
sourceType?: "document" | "website" | "api" | "database" | "internal"; sourceType: "document" | "website" | "api" | "database" | "internal";
connectionDetails?: Record<string, any>; connectionDetails: Record<string, any>;
processingPipeline?: Array<ProcessingStep>; processingPipeline: Array<ProcessingStep>;
refreshSchedule?: string; refreshSchedule?: string;
lastRefreshed?: Date; lastRefreshed?: Date;
status?: "active" | "pending" | "error" | "processing"; status: "active" | "pending" | "error" | "processing";
errorDetails?: string; errorDetails?: string;
metadata?: Record<string, any>; metadata?: Record<string, any>;
} }
export interface DesiredSalary { export interface DesiredSalary {
amount?: number; amount: number;
currency?: string; currency: string;
period?: "hour" | "day" | "month" | "year"; period: "hour" | "day" | "month" | "year";
} }
export interface EditHistory { export interface EditHistory {
content?: string; content: string;
editedAt?: Date; editedAt: Date;
editedBy?: string; editedBy: string;
} }
export interface Education { export interface Education {
id?: string; id?: string;
institution?: string; institution: string;
degree?: string; degree: string;
fieldOfStudy?: string; fieldOfStudy: string;
startDate?: Date; startDate: Date;
endDate?: Date; endDate?: Date;
isCurrent?: boolean; isCurrent: boolean;
gpa?: number; gpa?: number;
achievements?: Array<string>; achievements?: Array<string>;
location?: Location; location?: Location;
@ -321,45 +345,45 @@ export interface Education {
export interface Employer { export interface Employer {
id?: string; id?: string;
email?: string; email: string;
phone?: string; phone?: string;
createdAt?: Date; createdAt: Date;
updatedAt?: Date; updatedAt: Date;
lastLogin?: Date; lastLogin?: Date;
profileImage?: string; profileImage?: string;
status?: "active" | "inactive" | "pending" | "banned"; status: "active" | "inactive" | "pending" | "banned";
userType?: "employer"; userType?: "employer";
companyName?: string; companyName: string;
industry?: string; industry: string;
description?: string; description?: string;
companySize?: string; companySize: string;
companyDescription?: string; companyDescription: string;
websiteUrl?: string; websiteUrl?: string;
jobs?: Array<Job>; jobs?: Array<Job>;
location?: Location; location: Location;
companyLogo?: string; companyLogo?: string;
socialLinks?: Array<SocialLink>; socialLinks?: Array<SocialLink>;
poc?: PointOfContact; poc?: PointOfContact;
} }
export interface EmployerResponse { export interface EmployerResponse {
success?: boolean; success: boolean;
data?: Employer; data?: Employer;
error?: ErrorDetail; error?: ErrorDetail;
meta?: Record<string, any>; meta?: Record<string, any>;
} }
export interface ErrorDetail { export interface ErrorDetail {
code?: string; code: string;
message?: string; message: string;
details?: any; details?: any;
} }
export interface Guest { export interface Guest {
id?: string; id?: string;
sessionId?: string; sessionId: string;
createdAt?: Date; createdAt: Date;
lastActivity?: Date; lastActivity: Date;
convertedToUserId?: string; convertedToUserId?: string;
ipAddress?: string; ipAddress?: string;
userAgent?: string; userAgent?: string;
@ -367,49 +391,49 @@ export interface Guest {
export interface InterviewFeedback { export interface InterviewFeedback {
id?: string; id?: string;
interviewId?: string; interviewId: string;
reviewerId?: string; reviewerId: string;
technicalScore?: number; technicalScore: number;
culturalScore?: number; culturalScore: number;
overallScore?: number; overallScore: number;
strengths?: Array<string>; strengths: Array<string>;
weaknesses?: Array<string>; weaknesses: Array<string>;
recommendation?: "strong_hire" | "hire" | "no_hire" | "strong_no_hire"; recommendation: "strong_hire" | "hire" | "no_hire" | "strong_no_hire";
comments?: string; comments: string;
createdAt?: Date; createdAt: Date;
updatedAt?: Date; updatedAt: Date;
isVisible?: boolean; isVisible: boolean;
skillAssessments?: Array<SkillAssessment>; skillAssessments?: Array<SkillAssessment>;
} }
export interface InterviewSchedule { export interface InterviewSchedule {
id?: string; id?: string;
applicationId?: string; applicationId: string;
scheduledDate?: Date; scheduledDate: Date;
endDate?: Date; endDate: Date;
interviewType?: "phone" | "video" | "onsite" | "technical" | "behavioral"; interviewType: "phone" | "video" | "onsite" | "technical" | "behavioral";
interviewers?: Array<string>; interviewers: Array<string>;
location?: string | Location; location?: string | Location;
notes?: string; notes?: string;
feedback?: InterviewFeedback; feedback?: InterviewFeedback;
status?: "scheduled" | "completed" | "cancelled" | "rescheduled"; status: "scheduled" | "completed" | "cancelled" | "rescheduled";
meetingLink?: string; meetingLink?: string;
} }
export interface Job { export interface Job {
id?: string; id?: string;
title?: string; title: string;
description?: string; description: string;
responsibilities?: Array<string>; responsibilities: Array<string>;
requirements?: Array<string>; requirements: Array<string>;
preferredSkills?: Array<string>; preferredSkills?: Array<string>;
employerId?: string; employerId: string;
location?: Location; location: Location;
salaryRange?: SalaryRange; salaryRange?: SalaryRange;
employmentType?: "full-time" | "part-time" | "contract" | "internship" | "freelance"; employmentType: "full-time" | "part-time" | "contract" | "internship" | "freelance";
datePosted?: Date; datePosted: Date;
applicationDeadline?: Date; applicationDeadline?: Date;
isActive?: boolean; isActive: boolean;
applicants?: Array<JobApplication>; applicants?: Array<JobApplication>;
department?: string; department?: string;
reportsTo?: string; reportsTo?: string;
@ -422,12 +446,12 @@ export interface Job {
export interface JobApplication { export interface JobApplication {
id?: string; id?: string;
jobId?: string; jobId: string;
candidateId?: string; candidateId: string;
status?: "applied" | "reviewing" | "interview" | "offer" | "rejected" | "accepted" | "withdrawn"; status: "applied" | "reviewing" | "interview" | "offer" | "rejected" | "accepted" | "withdrawn";
appliedDate?: Date; appliedDate: Date;
updatedDate?: Date; updatedDate: Date;
resumeVersion?: string; resumeVersion: string;
coverLetter?: string; coverLetter?: string;
notes?: string; notes?: string;
interviewSchedules?: Array<InterviewSchedule>; interviewSchedules?: Array<InterviewSchedule>;
@ -437,28 +461,28 @@ export interface JobApplication {
} }
export interface JobListResponse { export interface JobListResponse {
success?: boolean; success: boolean;
data?: Array<Job>; data?: Array<Job>;
error?: ErrorDetail; error?: ErrorDetail;
meta?: Record<string, any>; meta?: Record<string, any>;
} }
export interface JobResponse { export interface JobResponse {
success?: boolean; success: boolean;
data?: Job; data?: Job;
error?: ErrorDetail; error?: ErrorDetail;
meta?: Record<string, any>; meta?: Record<string, any>;
} }
export interface Language { export interface Language {
language?: string; language: string;
proficiency?: "basic" | "conversational" | "fluent" | "native"; proficiency: "basic" | "conversational" | "fluent" | "native";
} }
export interface Location { export interface Location {
city?: string; city: string;
state?: string; state?: string;
country?: string; country: string;
postalCode?: string; postalCode?: string;
latitude?: number; latitude?: number;
longitude?: number; longitude?: number;
@ -468,15 +492,15 @@ export interface Location {
} }
export interface MessageReaction { export interface MessageReaction {
userId?: string; userId: string;
reaction?: string; reaction: string;
timestamp?: Date; timestamp: Date;
} }
export interface NotificationPreference { export interface NotificationPreference {
type?: "email" | "push" | "in_app"; type: "email" | "push" | "in_app";
events?: Array<string>; events: Array<string>;
isEnabled?: boolean; isEnabled: boolean;
} }
export interface PaginatedRequest { export interface PaginatedRequest {
@ -488,80 +512,79 @@ export interface PaginatedRequest {
} }
export interface PaginatedResponse { export interface PaginatedResponse {
data?: Array<any>; data: Array<any>;
total?: number; total: number;
page?: number; page: number;
limit?: number; limit: number;
totalPages?: number; totalPages: number;
hasMore?: boolean; hasMore: boolean;
} }
export interface PointOfContact { export interface PointOfContact {
name?: string; name: string;
position?: string; position: string;
email?: string; email: string;
phone?: string; phone?: string;
} }
export interface ProcessingStep { export interface ProcessingStep {
id?: string; id?: string;
type?: "extract" | "transform" | "chunk" | "embed" | "filter" | "summarize"; type: "extract" | "transform" | "chunk" | "embed" | "filter" | "summarize";
parameters?: Record<string, any>; parameters: Record<string, any>;
order?: number; order: number;
dependsOn?: Array<string>; dependsOn?: Array<string>;
} }
export interface Query {
prompt?: string;
tunables?: Tunables;
agentOptions?: Record<string, any>;
}
export interface RAGConfiguration { export interface RAGConfiguration {
id?: string; id?: string;
userId?: string; userId: string;
name?: string; name: string;
description?: string; description?: string;
dataSourceConfigurations?: Array<DataSourceConfiguration>; dataSourceConfigurations: Array<DataSourceConfiguration>;
embeddingModel?: string; embeddingModel: string;
vectorStoreType?: "pinecone" | "qdrant" | "faiss" | "milvus" | "weaviate"; vectorStoreType: "pinecone" | "qdrant" | "faiss" | "milvus" | "weaviate";
retrievalParameters?: RetrievalParameters; retrievalParameters: RetrievalParameters;
createdAt?: Date; createdAt: Date;
updatedAt?: Date; updatedAt: Date;
isDefault?: boolean; version: number;
version?: number; isActive: boolean;
isActive?: boolean; }
export interface RagEntry {
name: string;
description?: string;
enabled?: boolean;
} }
export interface RefreshToken { export interface RefreshToken {
token?: string; token: string;
expiresAt?: Date; expiresAt: Date;
device?: string; device: string;
ipAddress?: string; ipAddress: string;
isRevoked?: boolean; isRevoked: boolean;
revokedReason?: string; revokedReason?: string;
} }
export interface RetrievalParameters { export interface RetrievalParameters {
searchType?: "similarity" | "mmr" | "hybrid" | "keyword"; searchType: "similarity" | "mmr" | "hybrid" | "keyword";
topK?: number; topK: number;
similarityThreshold?: number; similarityThreshold?: number;
rerankerModel?: string; rerankerModel?: string;
useKeywordBoost?: boolean; useKeywordBoost: boolean;
filterOptions?: Record<string, any>; filterOptions?: Record<string, any>;
contextWindow?: number; contextWindow: number;
} }
export interface SalaryRange { export interface SalaryRange {
min?: number; min: number;
max?: number; max: number;
currency?: string; currency: string;
period?: "hour" | "day" | "month" | "year"; period: "hour" | "day" | "month" | "year";
isVisible?: boolean; isVisible: boolean;
} }
export interface SearchQuery { export interface SearchQuery {
query?: string; query: string;
filters?: Record<string, any>; filters?: Record<string, any>;
page?: number; page?: number;
limit?: number; limit?: number;
@ -571,21 +594,21 @@ export interface SearchQuery {
export interface Skill { export interface Skill {
id?: string; id?: string;
name?: string; name: string;
category?: string; category: string;
level?: "beginner" | "intermediate" | "advanced" | "expert"; level: "beginner" | "intermediate" | "advanced" | "expert";
yearsOfExperience?: number; yearsOfExperience?: number;
} }
export interface SkillAssessment { export interface SkillAssessment {
skillName?: string; skillName: string;
score?: number; score: number;
comments?: string; comments?: string;
} }
export interface SocialLink { export interface SocialLink {
platform?: "linkedin" | "twitter" | "github" | "dribbble" | "behance" | "website" | "other"; platform: "linkedin" | "twitter" | "github" | "dribbble" | "behance" | "website" | "other";
url?: string; url: string;
} }
export interface Tunables { export interface Tunables {
@ -598,35 +621,35 @@ export interface UserActivity {
id?: string; id?: string;
userId?: string; userId?: string;
guestId?: string; guestId?: string;
activityType?: "login" | "search" | "view_job" | "apply_job" | "message" | "update_profile" | "chat"; activityType: "login" | "search" | "view_job" | "apply_job" | "message" | "update_profile" | "chat";
timestamp?: Date; timestamp: Date;
metadata?: Record<string, any>; metadata: Record<string, any>;
ipAddress?: string; ipAddress?: string;
userAgent?: string; userAgent?: string;
sessionId?: string; sessionId?: string;
} }
export interface UserPreference { export interface UserPreference {
userId?: string; userId: string;
theme?: "light" | "dark" | "system"; theme: "light" | "dark" | "system";
notifications?: Array<NotificationPreference>; notifications: Array<NotificationPreference>;
accessibility?: AccessibilitySettings; accessibility: AccessibilitySettings;
dashboardLayout?: Record<string, any>; dashboardLayout?: Record<string, any>;
language?: string; language: string;
timezone?: string; timezone: string;
emailFrequency?: "immediate" | "daily" | "weekly" | "never"; emailFrequency: "immediate" | "daily" | "weekly" | "never";
} }
export interface WorkExperience { export interface WorkExperience {
id?: string; id?: string;
companyName?: string; companyName: string;
position?: string; position: string;
startDate?: Date; startDate: Date;
endDate?: Date; endDate?: Date;
isCurrent?: boolean; isCurrent: boolean;
description?: string; description: string;
skills?: Array<string>; skills: Array<string>;
location?: Location; location: Location;
achievements?: Array<string>; achievements?: Array<string>;
} }

View File

@ -0,0 +1,97 @@
from __future__ import annotations
from pydantic import BaseModel, Field # type: ignore
from typing import (
Literal,
get_args,
List,
AsyncGenerator,
TYPE_CHECKING,
Optional,
ClassVar,
Any,
TypeAlias,
Dict,
Tuple,
)
import importlib
import pathlib
import inspect
from prometheus_client import CollectorRegistry # type: ignore
from database import RedisDatabase
from . base import Agent
from logger import logger
_agents: List[Agent] = []
def get_or_create_agent(agent_type: str, prometheus_collector: CollectorRegistry, database: RedisDatabase, **kwargs) -> Agent:
"""
Get or create and append a new agent of the specified type, ensuring only one agent per type exists.
Args:
agent_type: The type of agent to create (e.g., 'web', 'database').
**kwargs: Additional fields required by the specific agent subclass.
Returns:
The created agent instance.
Raises:
ValueError: If no matching agent type is found or if a agent of this type already exists.
"""
# Check if a agent with the given agent_type already exists
for agent in _agents:
if agent.agent_type == agent_type:
return agent
# Find the matching subclass
for agent_cls in Agent.__subclasses__():
if agent_cls.model_fields["agent_type"].default == agent_type:
# Create the agent instance with provided kwargs
agent = agent_cls(agent_type=agent_type, prometheus_collector=prometheus_collector, database=database, **kwargs)
# if agent.agent_persist: # If an agent is not set to persist, do not add it to the list
_agents.append(agent)
return agent
raise ValueError(f"No agent class found for agent_type: {agent_type}")
# Type alias for Agent or any subclass
AnyAgent: TypeAlias = Agent # BaseModel covers Agent and subclasses
# Maps class_name to (module_name, class_name)
class_registry: Dict[str, Tuple[str, str]] = (
{}
)
__all__ = ['get_or_create_agent']
package_dir = pathlib.Path(__file__).parent
package_name = __name__
for path in package_dir.glob("*.py"):
if path.name in ("__init__.py", "base.py") or path.name.startswith("_"):
continue
module_name = path.stem
full_module_name = f"{package_name}.{module_name}"
try:
module = importlib.import_module(full_module_name)
# Find all Agent subclasses in the module
for name, obj in inspect.getmembers(module, inspect.isclass):
if (
issubclass(obj, AnyAgent)
and obj is not AnyAgent
and obj is not Agent
and name not in class_registry
):
class_registry[name] = (full_module_name, name)
globals()[name] = obj
logger.info(f"Adding agent: {name}")
__all__.append(name) # type: ignore
except ImportError as e:
logger.error(f"Error importing {full_module_name}: {e}")
raise e
except Exception as e:
logger.error(f"Error processing {full_module_name}: {e}")
raise e

605
src/backend/agents/base.py Normal file
View File

@ -0,0 +1,605 @@
from __future__ import annotations
from pydantic import BaseModel, Field, model_validator # type: ignore
from typing import (
Literal,
get_args,
List,
AsyncGenerator,
TYPE_CHECKING,
Optional,
ClassVar,
Any,
TypeAlias,
Dict,
Tuple,
)
import json
import time
import inspect
from abc import ABC
import asyncio
from datetime import datetime, UTC
from prometheus_client import Counter, Summary, CollectorRegistry # type: ignore
from models import ( ChatQuery, ChatMessage, Tunables, ChatStatusType, ChatMessageMetaData)
from logger import logger
import defines
from .registry import agent_registry
from metrics import Metrics
from database import RedisDatabase # type: ignore
class LLMMessage(BaseModel):
role: str = Field(default="")
content: str = Field(default="")
tool_calls: Optional[List[Dict]] = Field(default={}, exclude=True)
class Agent(BaseModel, ABC):
"""
Base class for all agent types.
This class defines the common attributes and methods for all agent types.
"""
class Config:
arbitrary_types_allowed = True # Allow arbitrary types like RedisDatabase
# Agent management with pydantic
agent_type: Literal["base"] = "base"
_agent_type: ClassVar[str] = agent_type # Add this for registration
agent_persist: bool = True # Whether this agent will persist in the database
database: RedisDatabase = Field(
...,
description="Database connection for this agent, used to store and retrieve data."
)
prometheus_collector: CollectorRegistry = Field(..., description="Prometheus collector for this agent, used to track metrics.", exclude=True)
# Tunables (sets default for new Messages attached to this agent)
tunables: Tunables = Field(default_factory=Tunables)
metrics: Metrics = Field(
None, description="Metrics collector for this agent, used to track performance and usage."
)
@model_validator(mode="after")
def initialize_metrics(self) -> "Agent":
if self.metrics is None:
self.metrics = Metrics(prometheus_collector=self.prometheus_collector)
return self
# Agent properties
system_prompt: str # Mandatory
context_tokens: int = 0
# context_size is shared across all subclasses
_context_size: ClassVar[int] = int(defines.max_context * 0.5)
conversation: List[ChatMessage] = Field(
default_factory=list,
description="Conversation history for this agent, used to maintain context across messages."
)
@property
def context_size(self) -> int:
return Agent._context_size
@context_size.setter
def context_size(self, value: int):
Agent._context_size = value
def set_optimal_context_size(
self, llm: Any, model: str, prompt: str, ctx_buffer=2048
) -> int:
# Most models average 1.3-1.5 tokens per word
word_count = len(prompt.split())
tokens = int(word_count * 1.4)
# Add buffer for safety
total_ctx = tokens + ctx_buffer
if total_ctx > self.context_size:
logger.info(
f"Increasing context size from {self.context_size} to {total_ctx}"
)
# Grow the context size if necessary
self.context_size = max(self.context_size, total_ctx)
# Use actual model maximum context size
return self.context_size
# Class and pydantic model management
def __init_subclass__(cls, **kwargs) -> None:
"""Auto-register subclasses"""
super().__init_subclass__(**kwargs)
# Register this class if it has an agent_type
if hasattr(cls, "agent_type") and cls.agent_type != Agent._agent_type:
agent_registry.register(cls.agent_type, cls)
def model_dump(self, *args, **kwargs) -> Any:
# Ensure context is always excluded, even with exclude_unset=True
kwargs.setdefault("exclude", set())
if isinstance(kwargs["exclude"], set):
kwargs["exclude"].add("context")
elif isinstance(kwargs["exclude"], dict):
kwargs["exclude"]["context"] = True
return super().model_dump(*args, **kwargs)
@classmethod
def valid_agent_types(cls) -> set[str]:
"""Return the set of valid agent_type values."""
return set(get_args(cls.__annotations__["agent_type"]))
# Agent methods
def get_agent_type(self):
return self._agent_type
# async def prepare_message(self, message: ChatMessage) -> AsyncGenerator[ChatMessage, None]:
# """
# Prepare message with context information in message.preamble
# """
# logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
# self.metrics.prepare_count.labels(agent=self.agent_type).inc()
# with self.metrics.prepare_duration.labels(agent=self.agent_type).time():
# if not self.context:
# raise ValueError("Context is not set for this agent.")
# # Generate RAG content if enabled, based on the content
# rag_context = ""
# if message.tunables.enable_rag and message.prompt:
# # Gather RAG results, yielding each result
# # as it becomes available
# for message in self.context.user.generate_rag_results(message):
# logger.info(f"RAG: {message.status} - {message.content}")
# if message.status == "error":
# yield message
# return
# if message.status != "done":
# yield message
# # for rag in message.metadata.rag:
# # for doc in rag.documents:
# # rag_context += f"{doc}\n"
# message.preamble = {}
# if rag_context:
# message.preamble["context"] = f"The following is context information about {self.context.user.full_name}:\n{rag_context}"
# if message.tunables.enable_context and self.context.user_resume:
# message.preamble["resume"] = self.context.user_resume
# message.system_prompt = self.system_prompt
# message.status = ChatStatusType.DONE
# yield message
# return
# async def process_tool_calls(
# self,
# llm: Any,
# model: str,
# message: ChatMessage,
# tool_message: Any, # llama response message
# messages: List[LLMMessage],
# ) -> AsyncGenerator[ChatMessage, None]:
# logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
# self.metrics.tool_count.labels(agent=self.agent_type).inc()
# with self.metrics.tool_duration.labels(agent=self.agent_type).time():
# if not self.context:
# raise ValueError("Context is not set for this agent.")
# if not message.metadata.tools:
# raise ValueError("tools field not initialized")
# tool_metadata = message.metadata.tools
# tool_metadata["tool_calls"] = []
# message.status = "tooling"
# for i, tool_call in enumerate(tool_message.tool_calls):
# arguments = tool_call.function.arguments
# tool = tool_call.function.name
# # Yield status update before processing each tool
# message.content = (
# f"Processing tool {i+1}/{len(tool_message.tool_calls)}: {tool}..."
# )
# yield message
# logger.info(f"LLM - {message.content}")
# # Process the tool based on its type
# match tool:
# case "TickerValue":
# ticker = arguments.get("ticker")
# if not ticker:
# ret = None
# else:
# ret = TickerValue(ticker)
# case "AnalyzeSite":
# url = arguments.get("url")
# question = arguments.get(
# "question", "what is the summary of this content?"
# )
# # Additional status update for long-running operations
# message.content = (
# f"Retrieving and summarizing content from {url}..."
# )
# yield message
# ret = await AnalyzeSite(
# llm=llm, model=model, url=url, question=question
# )
# case "GenerateImage":
# prompt = arguments.get("prompt", None)
# if not prompt:
# logger.info("No prompt supplied to GenerateImage")
# ret = { "error": "No prompt supplied to GenerateImage" }
# # Additional status update for long-running operations
# message.content = (
# f"Generating image for {prompt}..."
# )
# yield message
# ret = await GenerateImage(
# llm=llm, model=model, prompt=prompt
# )
# logger.info("GenerateImage returning", ret)
# case "DateTime":
# tz = arguments.get("timezone")
# ret = DateTime(tz)
# case "WeatherForecast":
# city = arguments.get("city")
# state = arguments.get("state")
# message.content = (
# f"Fetching weather data for {city}, {state}..."
# )
# yield message
# ret = WeatherForecast(city, state)
# case _:
# logger.error(f"Requested tool {tool} does not exist")
# ret = None
# # Build response for this tool
# tool_response = {
# "role": "tool",
# "content": json.dumps(ret),
# "name": tool_call.function.name,
# }
# tool_metadata["tool_calls"].append(tool_response)
# if len(tool_metadata["tool_calls"]) == 0:
# message.status = "done"
# yield message
# return
# message_dict = LLMMessage(
# role=tool_message.get("role", "assistant"),
# content=tool_message.get("content", ""),
# tool_calls=[
# {
# "function": {
# "name": tc["function"]["name"],
# "arguments": tc["function"]["arguments"],
# }
# }
# for tc in tool_message.tool_calls
# ],
# )
# messages.append(message_dict)
# messages.extend(tool_metadata["tool_calls"])
# message.status = "thinking"
# message.content = "Incorporating tool results into response..."
# yield message
# # Decrease creativity when processing tool call requests
# message.content = ""
# start_time = time.perf_counter()
# for response in llm.chat(
# model=model,
# messages=messages,
# options={
# **message.metadata.options,
# },
# stream=True,
# ):
# # logger.info(f"LLM::Tools: {'done' if response.done else 'processing'} - {response.message}")
# message.status = "streaming"
# message.chunk = response.message.content
# message.content += message.chunk
# if not response.done:
# yield message
# if response.done:
# self.collect_metrics(response)
# message.metadata.eval_count += response.eval_count
# message.metadata.eval_duration += response.eval_duration
# message.metadata.prompt_eval_count += response.prompt_eval_count
# message.metadata.prompt_eval_duration += response.prompt_eval_duration
# self.context_tokens = (
# response.prompt_eval_count + response.eval_count
# )
# message.status = "done"
# yield message
# end_time = time.perf_counter()
# message.metadata.timers["llm_with_tools"] = end_time - start_time
# return
def collect_metrics(self, response):
self.metrics.tokens_prompt.labels(agent=self.agent_type).inc(
response.prompt_eval_count
)
self.metrics.tokens_eval.labels(agent=self.agent_type).inc(response.eval_count)
async def generate(
self, llm: Any, model: str, query: ChatQuery, session_id: str, user_id: str, temperature=0.7
) -> AsyncGenerator[ChatMessage, None]:
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
chat_message = ChatMessage(
session_id=session_id,
prompt=query.prompt,
tunables=query.tunables,
status=ChatStatusType.PREPARING,
sender="user",
content="",
timestamp=datetime.now(UTC)
)
self.metrics.generate_count.labels(agent=self.agent_type).inc()
with self.metrics.generate_duration.labels(agent=self.agent_type).time():
# Create a pruned down message list based purely on the prompt and responses,
# discarding the full preamble generated by prepare_message
messages: List[LLMMessage] = [
LLMMessage(role="system", content=self.system_prompt)
]
messages.extend(
[
item
for m in self.conversation
for item in [
LLMMessage(role="user", content=m.prompt.strip() if m.prompt else ""),
LLMMessage(role="assistant", content=m.response.strip()),
]
]
)
# Only the actual user query is provided with the full context message
messages.append(
LLMMessage(role="user", content=query.prompt.strip())
)
# message.messages = messages
chat_message.metadata = ChatMessageMetaData()
chat_message.metadata.options = {
"seed": 8911,
"num_ctx": self.context_size,
"temperature": temperature, # Higher temperature to encourage tool usage
}
# Create a dict for storing various timing stats
chat_message.metadata.timers = {}
# use_tools = message.tunables.enable_tools and len(self.context.tools) > 0
# message.metadata.tools = {
# "available": llm_tools(self.context.tools),
# "used": False,
# }
# tool_metadata = message.metadata.tools
# if use_tools:
# message.status = "thinking"
# message.content = f"Performing tool analysis step 1/2..."
# yield message
# logger.info("Checking for LLM tool usage")
# start_time = time.perf_counter()
# # Tools are enabled and available, so query the LLM with a short context of messages
# # in case the LLM did something like ask "Do you want me to run the tool?" and the
# # user said "Yes" -- need to keep the context in the thread.
# tool_metadata["messages"] = (
# [{"role": "system", "content": self.system_prompt}] + messages[-6:]
# if len(messages) >= 7
# else messages
# )
# response = llm.chat(
# model=model,
# messages=tool_metadata["messages"],
# tools=tool_metadata["available"],
# options={
# **message.metadata.options,
# },
# stream=False, # No need to stream the probe
# )
# self.collect_metrics(response)
# end_time = time.perf_counter()
# message.metadata.timers["tool_check"] = end_time - start_time
# if not response.message.tool_calls:
# logger.info("LLM indicates tools will not be used")
# # The LLM will not use tools, so disable use_tools so we can stream the full response
# use_tools = False
# else:
# tool_metadata["attempted"] = response.message.tool_calls
# if use_tools:
# logger.info("LLM indicates tools will be used")
# # Tools are enabled and available and the LLM indicated it will use them
# message.content = (
# f"Performing tool analysis step 2/2 (tool use suspected)..."
# )
# yield message
# logger.info(f"Performing LLM call with tools")
# start_time = time.perf_counter()
# response = llm.chat(
# model=model,
# messages=tool_metadata["messages"], # messages,
# tools=tool_metadata["available"],
# options={
# **message.metadata.options,
# },
# stream=False,
# )
# self.collect_metrics(response)
# end_time = time.perf_counter()
# message.metadata.timers["non_streaming"] = end_time - start_time
# if not response:
# message.status = "error"
# message.content = "No response from LLM."
# yield message
# return
# if response.message.tool_calls:
# tool_metadata["used"] = response.message.tool_calls
# # Process all yielded items from the handler
# start_time = time.perf_counter()
# async for message in self.process_tool_calls(
# llm=llm,
# model=model,
# message=message,
# tool_message=response.message,
# messages=messages,
# ):
# if message.status == "error":
# yield message
# return
# yield message
# end_time = time.perf_counter()
# message.metadata.timers["process_tool_calls"] = end_time - start_time
# message.status = "done"
# return
# logger.info("LLM indicated tools will be used, and then they weren't")
# message.content = response.message.content
# message.status = "done"
# yield message
# return
# not use_tools
chat_message.status = ChatStatusType.THINKING
chat_message.content = f"Generating response..."
yield chat_message
# Reset the response for streaming
chat_message.content = ""
start_time = time.perf_counter()
for response in llm.chat(
model=model,
messages=messages,
options={
**chat_message.metadata.options,
},
stream=True,
):
if not response:
chat_message.status = ChatStatusType.ERROR
chat_message.content = "No response from LLM."
yield chat_message
return
chat_message.status = ChatStatusType.STREAMING
chat_message.chunk = response.message.content
chat_message.content += chat_message.chunk
if not response.done:
yield chat_message
if response.done:
self.collect_metrics(response)
chat_message.metadata.eval_count += response.eval_count
chat_message.metadata.eval_duration += response.eval_duration
chat_message.metadata.prompt_eval_count += response.prompt_eval_count
chat_message.metadata.prompt_eval_duration += response.prompt_eval_duration
self.context_tokens = (
response.prompt_eval_count + response.eval_count
)
chat_message.status = ChatStatusType.DONE
yield chat_message
end_time = time.perf_counter()
chat_message.metadata.timers["streamed"] = end_time - start_time
chat_message.status = ChatStatusType.DONE
self.conversation.append(chat_message)
return
# async def process_message(
# self, llm: Any, model: str, message: Message
# ) -> AsyncGenerator[Message, None]:
# logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
# self.metrics.process_count.labels(agent=self.agent_type).inc()
# with self.metrics.process_duration.labels(agent=self.agent_type).time():
# if not self.context:
# raise ValueError("Context is not set for this agent.")
# logger.info(
# "TODO: Implement delay queing; busy for same agent, otherwise return queue size and estimated wait time"
# )
# spinner: List[str] = ["\\", "|", "/", "-"]
# tick: int = 0
# while self.context.processing:
# message.status = "waiting"
# message.content = (
# f"Busy processing another request. Please wait. {spinner[tick]}"
# )
# tick = (tick + 1) % len(spinner)
# yield message
# await asyncio.sleep(1) # Allow the event loop to process the write
# self.context.processing = True
# message.system_prompt = (
# f"<|system|>\n{self.system_prompt.strip()}\n</|system|>"
# )
# message.context_prompt = ""
# for p in message.preamble.keys():
# message.context_prompt += (
# f"\n<|{p}|>\n{message.preamble[p].strip()}\n</|{p}>\n\n"
# )
# message.context_prompt += f"{message.prompt}"
# # Estimate token length of new messages
# message.content = f"Optimizing context..."
# message.status = "thinking"
# yield message
# message.context_size = self.set_optimal_context_size(
# llm, model, prompt=message.context_prompt
# )
# message.content = f"Processing {'RAG augmented ' if message.metadata.rag else ''}query..."
# message.status = "thinking"
# yield message
# async for message in self.generate_llm_response(
# llm=llm, model=model, message=message
# ):
# # logger.info(f"LLM: {message.status} - {f'...{message.content[-20:]}' if len(message.content) > 20 else message.content}")
# if message.status == "error":
# yield message
# self.context.processing = False
# return
# yield message
# # Done processing, add message to conversation
# message.status = "done"
# self.conversation.add(message)
# self.context.processing = False
# return
# Register the base agent
agent_registry.register(Agent._agent_type, Agent)

View File

@ -0,0 +1,88 @@
from __future__ import annotations
from typing import Literal, AsyncGenerator, ClassVar, Optional, Any
from datetime import datetime
import inspect
from .base import Agent, agent_registry
from logger import logger
from .registry import agent_registry
from models import ( ChatQuery, ChatMessage, Tunables, ChatStatusType)
system_message = f"""
Launched on {datetime.now().isoformat()}.
When answering queries, follow these steps:
- First analyze the query to determine if real-time information from the tools might be helpful
- Even when <|context|> or <|resume|> is provided, consider whether the tools would provide more current or comprehensive information
- Use the provided tools whenever they would enhance your response, regardless of whether context is also available
- When presenting weather forecasts, include relevant emojis immediately before the corresponding text. For example, for a sunny day, say \"☀️ Sunny\" or if the forecast says there will be \"rain showers, say \"🌧️ Rain showers\". Use this mapping for weather emojis: Sunny: ☀️, Cloudy: ☁️, Rainy: 🌧️, Snowy: ❄️
- When any combination of <|context|>, <|resume|> and tool outputs are relevant, synthesize information from all sources to provide the most complete answer
- Always prioritize the most up-to-date and relevant information, whether it comes from <|context|>, <|resume|> or tools
- If <|context|> and tool outputs contain conflicting information, prefer the tool outputs as they likely represent more current data
- If there is information in the <|context|> or <|resume|> sections to enhance the answer, incorporate it seamlessly and refer to it as 'the latest information' or 'recent data' instead of mentioning '<|context|>' (etc.) or quoting it directly.
- Avoid phrases like 'According to the <|context|>' or similar references to the <|context|> or <|resume|>.
CRITICAL INSTRUCTIONS FOR IMAGE GENERATION:
1. When the user requests to generate an image, inject the following into the response: <GenerateImage prompt="USER-PROMPT"/>. Do this when users request images, drawings, or visual content.
3. MANDATORY: You must respond with EXACTLY this format: <GenerateImage prompt="{{USER-PROMPT}}"/>
4. FORBIDDEN: DO NOT use markdown image syntax ![](url)
5. FORBIDDEN: DO NOT create fake URLs or file paths
6. FORBIDDEN: DO NOT use any other image embedding format
CORRECT EXAMPLE:
User: "Draw a cat"
Your response: "<GenerateImage prompt='Draw a cat'/>"
WRONG EXAMPLES (DO NOT DO THIS):
- ![](https://example.com/...)
- ![Cat image](any_url)
- <img src="...">
The <GenerateImage prompt="{{USER-PROMPT}}"/> format is the ONLY way to display images in this system.
DO NOT make up a URL for an image or provide markdown syntax for embedding an image. Only use <GenerateImage prompt="{{USER-PROMPT}}".
Always use tools, <|resume|>, and <|context|> when possible. Be concise, and never make up information. If you do not know the answer, say so.
"""
class Chat(Agent):
"""
Chat Agent
"""
agent_type: Literal["general"] = "general" # type: ignore
_agent_type: ClassVar[str] = agent_type # Add this for registration
system_prompt: str = system_message
# async def prepare_message(self, message: Message) -> AsyncGenerator[Message, None]:
# logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
# if not self.context:
# raise ValueError("Context is not set for this agent.")
# async for message in super().prepare_message(message):
# if message.status != "done":
# yield message
# if message.preamble:
# excluded = {}
# preamble_types = [
# f"<|{p}|>" for p in message.preamble.keys() if p not in excluded
# ]
# preamble_types_AND = " and ".join(preamble_types)
# preamble_types_OR = " or ".join(preamble_types)
# message.preamble[
# "rules"
# ] = f"""\
# - Answer the question based on the information provided in the {preamble_types_AND} sections by incorporate it seamlessly and refer to it using natural language instead of mentioning {preamble_types_OR} or quoting it directly.
# - If there is no information in these sections, answer based on your knowledge, or use any available tools.
# - Avoid phrases like 'According to the {preamble_types[0]}' or similar references to the {preamble_types_OR}.
# """
# message.preamble["question"] = "Respond to:"
# Register the base agent
agent_registry.register(Chat._agent_type, Chat)

View File

@ -0,0 +1,33 @@
from __future__ import annotations
from typing import List, Dict, Optional, Type
# We'll use a registry pattern rather than hardcoded strings
class AgentRegistry:
"""Registry for agent types and classes"""
_registry: Dict[str, Type] = {}
@classmethod
def register(cls, agent_type: str, agent_class: Type) -> Type:
"""Register an agent class with its type"""
cls._registry[agent_type] = agent_class
return agent_class
@classmethod
def get_class(cls, agent_type: str) -> Optional[Type]:
"""Get the class for a given agent type"""
return cls._registry.get(agent_type)
@classmethod
def get_types(cls) -> List[str]:
"""Get all registered agent types"""
return list(cls._registry.keys())
@classmethod
def get_classes(cls) -> Dict[str, Type]:
"""Get all registered agent classes"""
return cls._registry.copy()
# Create a singleton instance
agent_registry = AgentRegistry()

View File

@ -8,7 +8,7 @@ import sys
from datetime import datetime from datetime import datetime
from models import ( from models import (
UserStatus, UserType, SkillLevel, EmploymentType, UserStatus, UserType, SkillLevel, EmploymentType,
Candidate, Employer, Location, Skill, AIParameters, AIModelType Candidate, Employer, Location, Skill, AIModelType
) )
@ -119,41 +119,23 @@ def test_api_dict_format():
def test_validation_constraints(): def test_validation_constraints():
"""Test that validation constraints work""" """Test that validation constraints work"""
print("\n🔒 Testing validation constraints...") print("\n🔒 Testing validation constraints...")
# Test AI Parameters with constraints
valid_params = AIParameters(
name="Test Config",
model=AIModelType.QWEN2_5,
temperature=0.7, # Valid: 0-1
maxTokens=2000, # Valid: > 0
topP=0.95, # Valid: 0-1
frequencyPenalty=0.0, # Valid: -2 to 2
presencePenalty=0.0, # Valid: -2 to 2
isDefault=True,
createdAt=datetime.now(),
updatedAt=datetime.now()
)
print(f"✅ Valid AI parameters created")
# Test constraint violation
try: try:
invalid_params = AIParameters( # Create a candidate with invalid email
name="Invalid Config", invalid_candidate = Candidate(
model=AIModelType.QWEN2_5, email="invalid-email",
temperature=1.5, # Invalid: > 1 username="test_invalid",
maxTokens=2000,
topP=0.95,
frequencyPenalty=0.0,
presencePenalty=0.0,
isDefault=True,
createdAt=datetime.now(), createdAt=datetime.now(),
updatedAt=datetime.now() updatedAt=datetime.now(),
status=UserStatus.ACTIVE,
firstName="Jane",
lastName="Doe",
fullName="Jane Doe"
) )
print("❌ Should have rejected invalid temperature") print("❌ Validation should have failed but didn't")
return False return False
except Exception: except ValueError as e:
print(f"✅ Constraint validation working") print(f"✅ Validation error caught: {e}")
return True return True
def test_enum_values(): def test_enum_values():
@ -201,6 +183,7 @@ def main():
print(f"\n❌ Test failed: {type(e).__name__}: {e}") print(f"\n❌ Test failed: {type(e).__name__}: {e}")
import traceback import traceback
traceback.print_exc() traceback.print_exc()
print(f"\n{traceback.format_exc()}")
return False return False
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -195,48 +195,107 @@ def snake_to_camel(snake_str: str) -> str:
components = snake_str.split('_') components = snake_str.split('_')
return components[0] + ''.join(x.title() for x in components[1:]) return components[0] + ''.join(x.title() for x in components[1:])
def is_field_optional(field_info: Any, field_type: Any) -> bool: def is_field_optional(field_info: Any, field_type: Any, debug: bool = False) -> bool:
"""Determine if a field should be optional in TypeScript""" """Determine if a field should be optional in TypeScript"""
if debug:
print(f" 🔍 Analyzing field optionality:")
# First, check if the type itself is Optional (Union with None) # First, check if the type itself is Optional (Union with None)
origin = get_origin(field_type) origin = get_origin(field_type)
args = get_args(field_type) args = get_args(field_type)
is_union_with_none = origin is Union and type(None) in args is_union_with_none = origin is Union and type(None) in args
if debug:
print(f" └─ Type is Optional[T]: {is_union_with_none}")
# If the type is Optional[T], it's always optional regardless of Field settings # If the type is Optional[T], it's always optional regardless of Field settings
if is_union_with_none: if is_union_with_none:
if debug:
print(f" └─ RESULT: Optional (type is Optional[T])")
return True return True
# For non-Optional types, check Field settings and defaults # For non-Optional types, check Field settings and defaults
# Check for default factory (makes field optional) # Check for default factory (makes field optional)
if hasattr(field_info, 'default_factory') and field_info.default_factory is not None: has_default_factory = hasattr(field_info, 'default_factory') and field_info.default_factory is not None
if debug:
print(f" └─ Has default factory: {has_default_factory}")
if has_default_factory:
if debug:
print(f" └─ RESULT: Optional (has default factory)")
return True return True
# Check the default value # Check the default value - this is the tricky part
if hasattr(field_info, 'default'): if hasattr(field_info, 'default'):
default_val = field_info.default default_val = field_info.default
if debug:
print(f" └─ Has default attribute: {repr(default_val)} (type: {type(default_val)})")
# Field(...) or Ellipsis means REQUIRED (not optional) # Check for different types of "no default" markers
if default_val is ...: # Pydantic uses various markers for "no default"
if default_val is ...: # Ellipsis
if debug:
print(f" └─ RESULT: Required (default is Ellipsis)")
return False
# Check for Pydantic's internal "PydanticUndefined" or similar markers
default_str = str(default_val)
default_type_str = str(type(default_val))
# Common patterns for "undefined" in Pydantic
undefined_patterns = [
'PydanticUndefined',
'Undefined',
'_Unset',
'UNSET',
'NotSet',
'_MISSING'
]
is_undefined_marker = any(pattern in default_str or pattern in default_type_str
for pattern in undefined_patterns)
if debug:
print(f" └─ Checking for undefined markers in: {default_str} | {default_type_str}")
print(f" └─ Is undefined marker: {is_undefined_marker}")
if is_undefined_marker:
if debug:
print(f" └─ RESULT: Required (default is undefined marker)")
return False return False
# Any other default value (including None) makes it optional # Any other actual default value makes it optional
# This covers: Field(None), Field("some_value"), = "some_value", = None, etc. if debug:
else: print(f" └─ RESULT: Optional (has actual default value)")
return True return True
else:
if debug:
print(f" └─ No default attribute found")
# If no default attribute exists, check Pydantic's required flag
if hasattr(field_info, 'is_required'):
try:
is_required = field_info.is_required()
if debug:
print(f" └─ is_required(): {is_required}")
return not is_required
except:
if debug:
print(f" └─ is_required() failed")
pass
# Check the 'required' attribute (Pydantic v1 style)
if hasattr(field_info, 'required'):
is_required = field_info.required
if debug:
print(f" └─ required attribute: {is_required}")
return not is_required
# If no default is set at all, check if field is explicitly marked as not required # Default: if type is not Optional and no clear default, it's required (not optional)
# This is for edge cases in Pydantic v2 if debug:
if hasattr(field_info, 'is_required'): print(f" └─ RESULT: Required (fallback - no Optional type, no default)")
try:
return not field_info.is_required()
except:
pass
elif hasattr(field_info, 'required'):
return not field_info.required
# Default: if type is not Optional and no explicit default, it's required (not optional)
return False return False
def process_pydantic_model(model_class, debug: bool = False) -> Dict[str, Any]: def process_pydantic_model(model_class, debug: bool = False) -> Dict[str, Any]:
@ -271,27 +330,12 @@ def process_pydantic_model(model_class, debug: bool = False) -> Dict[str, Any]:
ts_type = python_type_to_typescript(field_type, debug) ts_type = python_type_to_typescript(field_type, debug)
# Check if optional # Check if optional
is_optional = is_field_optional(field_info, field_type) is_optional = is_field_optional(field_info, field_type, debug)
if debug: if debug:
print(f" TS name: {ts_name}") print(f" TS name: {ts_name}")
print(f" TS type: {ts_type}") print(f" TS type: {ts_type}")
print(f" Optional: {is_optional}") print(f" Optional: {is_optional}")
# Debug the optional logic
origin = get_origin(field_type)
args = get_args(field_type)
is_union_with_none = origin is Union and type(None) in args
has_default = hasattr(field_info, 'default')
has_default_factory = hasattr(field_info, 'default_factory') and field_info.default_factory is not None
print(f" └─ Type is Optional: {is_union_with_none}")
if has_default:
default_val = field_info.default
print(f" └─ Has default: {default_val} (is ...? {default_val is ...})")
else:
print(f" └─ No default attribute")
print(f" └─ Has default factory: {has_default_factory}")
print() print()
properties.append({ properties.append({

View File

@ -0,0 +1,41 @@
import ollama
import defines
_llm = ollama.Client(host=defines.ollama_api_url) # type: ignore
class llm_manager:
"""
A class to manage LLM operations using the Ollama client.
"""
@staticmethod
def get_llm() -> ollama.Client: # type: ignore
"""
Get the Ollama client instance.
Returns:
An instance of the Ollama client.
"""
return _llm
@staticmethod
def get_models() -> list[str]:
"""
Get a list of available models from the Ollama client.
Returns:
List of model names.
"""
return _llm.models()
@staticmethod
def get_model_info(model_name: str) -> dict:
"""
Get information about a specific model.
Args:
model_name: The name of the model to retrieve information for.
Returns:
A dictionary containing model information.
"""
return _llm.model(model_name)

View File

@ -1,7 +1,7 @@
from fastapi import FastAPI, HTTPException, Depends, Query, Path, Body, status, APIRouter, Request # type: ignore from fastapi import FastAPI, HTTPException, Depends, Query, Path, Body, status, APIRouter, Request # type: ignore
from fastapi.middleware.cors import CORSMiddleware # type: ignore from fastapi.middleware.cors import CORSMiddleware # type: ignore
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials # type: ignore from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials # type: ignore
from fastapi.responses import JSONResponse # type: ignore from fastapi.responses import JSONResponse, StreamingResponse# type: ignore
from fastapi.staticfiles import StaticFiles # type: ignore from fastapi.staticfiles import StaticFiles # type: ignore
import uvicorn # type: ignore import uvicorn # type: ignore
from typing import List, Optional, Dict, Any from typing import List, Optional, Dict, Any
@ -15,6 +15,7 @@ import re
import asyncio import asyncio
import signal import signal
import json import json
import traceback
# Prometheus # Prometheus
from prometheus_client import Summary # type: ignore from prometheus_client import Summary # type: ignore
@ -24,25 +25,24 @@ from prometheus_client import CollectorRegistry, Counter # type: ignore
# Import Pydantic models # Import Pydantic models
from models import ( from models import (
# User models # User models
Candidate, Employer, BaseUser, Guest, Authentication, AuthResponse, Candidate, Employer, BaseUserWithType, BaseUser, Guest, Authentication, AuthResponse,
# Job models # Job models
Job, JobApplication, ApplicationStatus, Job, JobApplication, ApplicationStatus,
# Chat models # Chat models
ChatSession, ChatMessage, ChatContext, ChatSession, ChatMessage, ChatContext, ChatQuery,
# AI models
AIParameters,
# Supporting models # Supporting models
Location, Skill, WorkExperience, Education Location, Skill, WorkExperience, Education
) )
import defines import defines
import agents
from logger import logger from logger import logger
from database import RedisDatabase, redis_manager, DatabaseManager from database import RedisDatabase, redis_manager, DatabaseManager
from metrics import Metrics from metrics import Metrics
from llm_manager import llm_manager
# Initialize FastAPI app # Initialize FastAPI app
# ============================ # ============================
@ -178,7 +178,7 @@ async def verify_token_with_blacklist(credentials: HTTPAuthorizationCredentials
async def get_current_user( async def get_current_user(
user_id: str = Depends(verify_token_with_blacklist), user_id: str = Depends(verify_token_with_blacklist),
database: RedisDatabase = Depends(lambda: db_manager.get_database()) database: RedisDatabase = Depends(lambda: db_manager.get_database())
): ) -> BaseUserWithType:
"""Get current user from database""" """Get current user from database"""
try: try:
# Check candidates first # Check candidates first
@ -354,16 +354,17 @@ async def login(
@api_router.post("/auth/logout") @api_router.post("/auth/logout")
async def logout( async def logout(
refreshToken: str = Body(..., alias="refreshToken"), access_token: str = Body(..., alias="accessToken"),
accessToken: Optional[str] = Body(None, alias="accessToken"), refresh_token: str = Body(..., alias="refreshToken"),
current_user = Depends(get_current_user), current_user = Depends(get_current_user),
database: RedisDatabase = Depends(get_database) database: RedisDatabase = Depends(get_database)
): ):
"""Logout endpoint - revokes both access and refresh tokens""" """Logout endpoint - revokes both access and refresh tokens"""
logger.info(f"🔑 User {current_user.id} is logging out")
try: try:
# Verify refresh token # Verify refresh token
try: try:
refresh_payload = jwt.decode(refreshToken, SECRET_KEY, algorithms=[ALGORITHM]) refresh_payload = jwt.decode(refresh_token, SECRET_KEY, algorithms=[ALGORITHM])
user_id = refresh_payload.get("sub") user_id = refresh_payload.get("sub")
token_type = refresh_payload.get("type") token_type = refresh_payload.get("type")
refresh_exp = refresh_payload.get("exp") refresh_exp = refresh_payload.get("exp")
@ -394,7 +395,7 @@ async def logout(
refresh_ttl = max(0, refresh_exp - int(datetime.now(UTC).timestamp())) refresh_ttl = max(0, refresh_exp - int(datetime.now(UTC).timestamp()))
if refresh_ttl > 0: if refresh_ttl > 0:
await redis_client.setex( await redis_client.setex(
f"blacklisted_token:{refreshToken}", f"blacklisted_token:{refresh_token}",
refresh_ttl, refresh_ttl,
json.dumps({ json.dumps({
"user_id": user_id, "user_id": user_id,
@ -406,9 +407,9 @@ async def logout(
logger.info(f"🔒 Blacklisted refresh token for user {user_id}") logger.info(f"🔒 Blacklisted refresh token for user {user_id}")
# If access token is provided, revoke it too # If access token is provided, revoke it too
if accessToken: if access_token:
try: try:
access_payload = jwt.decode(accessToken, SECRET_KEY, algorithms=[ALGORITHM]) access_payload = jwt.decode(access_token, SECRET_KEY, algorithms=[ALGORITHM])
access_user_id = access_payload.get("sub") access_user_id = access_payload.get("sub")
access_exp = access_payload.get("exp") access_exp = access_payload.get("exp")
@ -417,7 +418,7 @@ async def logout(
access_ttl = max(0, access_exp - int(datetime.now(UTC).timestamp())) access_ttl = max(0, access_exp - int(datetime.now(UTC).timestamp()))
if access_ttl > 0: if access_ttl > 0:
await redis_client.setex( await redis_client.setex(
f"blacklisted_token:{accessToken}", f"blacklisted_token:{access_token}",
access_ttl, access_ttl,
json.dumps({ json.dumps({
"user_id": user_id, "user_id": user_id,
@ -447,7 +448,7 @@ async def logout(
"message": "Logged out successfully", "message": "Logged out successfully",
"tokensRevoked": { "tokensRevoked": {
"refreshToken": True, "refreshToken": True,
"accessToken": bool(accessToken) "accessToken": bool(access_token)
} }
}) })
@ -905,7 +906,7 @@ async def search_jobs(
@api_router.post("/chat/sessions") @api_router.post("/chat/sessions")
async def create_chat_session( async def create_chat_session(
session_data: Dict[str, Any] = Body(...), session_data: Dict[str, Any] = Body(...),
current_user = Depends(get_current_user), current_user : BaseUserWithType = Depends(get_current_user),
database: RedisDatabase = Depends(get_database) database: RedisDatabase = Depends(get_database)
): ):
"""Create a new chat session""" """Create a new chat session"""
@ -919,6 +920,7 @@ async def create_chat_session(
chat_session = ChatSession.model_validate(session_data) chat_session = ChatSession.model_validate(session_data)
await database.set_chat_session(chat_session.id, chat_session.model_dump()) await database.set_chat_session(chat_session.id, chat_session.model_dump())
logger.info(f"✅ Chat session created: {chat_session.id} for user {current_user.id}")
return create_success_response(chat_session.model_dump(by_alias=True)) return create_success_response(chat_session.model_dump(by_alias=True))
except Exception as e: except Exception as e:
@ -952,7 +954,117 @@ async def get_chat_session(
status_code=500, status_code=500,
content=create_error_response("FETCH_ERROR", str(e)) content=create_error_response("FETCH_ERROR", str(e))
) )
@api_router.get("/chat/sessions/{session_id}/messages")
async def get_chat_session_messages(
session_id: str = Path(...),
current_user = Depends(get_current_user),
page: int = Query(1, ge=1),
limit: int = Query(20, ge=1, le=100),
sortBy: Optional[str] = Query(None, alias="sortBy"),
sortOrder: str = Query("desc", pattern="^(asc|desc)$", alias="sortOrder"),
filters: Optional[str] = Query(None),
database: RedisDatabase = Depends(get_database)
):
"""Get a chat session by ID"""
try:
chat_session_data = await database.get_chat_session(session_id)
if not chat_session_data:
return JSONResponse(
status_code=404,
content=create_error_response("NOT_FOUND", "Chat session not found")
)
chat_messages = await database.get_chat_messages(session_id)
# Convert messages to ChatMessage objects
messages_list = [ChatMessage.model_validate(msg) for msg in chat_messages]
# Apply filters and pagination
filter_dict = None
if filters:
filter_dict = json.loads(filters)
paginated_messages, total = filter_and_paginate(
messages_list, page, limit, sortBy, sortOrder, filter_dict
)
paginated_response = create_paginated_response(
[m.model_dump(by_alias=True) for m in paginated_messages],
page, limit, total
)
return create_success_response(paginated_response)
except Exception as e:
logger.error(f"Get chat session error: {e}")
return JSONResponse(
status_code=500,
content=create_error_response("FETCH_ERROR", str(e))
)
@api_router.post("/chat/sessions/{session_id}/messages/stream")
async def post_chat_session_message_stream(
session_id: str = Path(...),
data: Dict[str, Any] = Body(...),
current_user = Depends(get_current_user),
database: RedisDatabase = Depends(get_database),
request: Request = Request, # For streaming response
):
"""Post a message to a chat session and stream the response"""
try:
chat_session_data = await database.get_chat_session(session_id)
if not chat_session_data:
return JSONResponse(
status_code=404,
content=create_error_response("NOT_FOUND", "Chat session not found")
)
chat_type = chat_session_data.get("context", {}).get("type", "general")
logger.info(f"🔗 Chat session {session_id} type {chat_type} accessed by user {current_user.id}")
query = data.get("query")
if not query:
return JSONResponse(
status_code=400,
content=create_error_response("INVALID_QUERY", "Query cannot be empty")
)
chat_query = ChatQuery.model_validate(query)
chat_agent = agents.get_or_create_agent(agent_type=chat_type, prometheus_collector=prometheus_collector, database=database)
if not chat_agent:
return JSONResponse(
status_code=400,
content=create_error_response("AGENT_NOT_FOUND", "No agent found for this chat type")
)
async def message_stream_generator():
"""Generator to stream messages"""
async for message in chat_agent.generate(
llm=llm_manager.get_llm(),
model=defines.model,
query=chat_query,
session_id=session_id,
user_id=current_user.id,
):
json_data = message.model_dump(mode='json', by_alias=True)
json_str = json.dumps(json_data)
logger.info(f"🔗 Streaming message for session {session_id}: {json_str}")
yield json_str + "\n"
return StreamingResponse(
message_stream_generator(),
media_type="application/json",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Prevents Nginx buffering if you're using it
},
)
except Exception as e:
logger.error(traceback.format_exc())
logger.error(f"Get chat session error: {e}")
return JSONResponse(
status_code=500,
content=create_error_response("FETCH_ERROR", str(e))
)
@api_router.get("/chat/sessions") @api_router.get("/chat/sessions")
async def get_chat_sessions( async def get_chat_sessions(
page: int = Query(1, ge=1), page: int = Query(1, ge=1),

View File

@ -1,7 +1,7 @@
from typing import List, Dict, Optional, Any, Union, Literal, TypeVar, Generic, Annotated from typing import List, Dict, Optional, Any, Union, Literal, TypeVar, Generic, Annotated
from pydantic import BaseModel, Field, EmailStr, HttpUrl, validator # type: ignore from pydantic import BaseModel, Field, EmailStr, HttpUrl, validator # type: ignore
from pydantic.types import constr, conint # type: ignore from pydantic.types import constr, conint # type: ignore
from datetime import datetime, date from datetime import datetime, date, UTC
from enum import Enum from enum import Enum
import uuid import uuid
@ -68,10 +68,11 @@ class ChatSenderType(str, Enum):
SYSTEM = "system" SYSTEM = "system"
class ChatStatusType(str, Enum): class ChatStatusType(str, Enum):
PARTIAL = "partial" PREPARING = "preparing"
DONE = "done"
STREAMING = "streaming"
THINKING = "thinking" THINKING = "thinking"
PARTIAL = "partial"
STREAMING = "streaming"
DONE = "done"
ERROR = "error" ERROR = "error"
class ChatContextType(str, Enum): class ChatContextType(str, Enum):
@ -519,47 +520,73 @@ class JobApplication(BaseModel):
class Config: class Config:
populate_by_name = True # Allow both field names and aliases populate_by_name = True # Allow both field names and aliases
class AIParameters(BaseModel): class RagEntry(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
user_id: Optional[str] = Field(None, alias="userId")
name: str name: str
description: Optional[str] = None description: str = ""
model: AIModelType enabled: bool = True
temperature: Optional[Annotated[float, Field(ge=0, le=1)]] = 0.7
max_tokens: Optional[Annotated[int, Field(gt=0)]] = Field(..., alias="maxTokens") class ChromaDBGetResponse(BaseModel):
top_p: Optional[Annotated[float, Field(ge=0, le=1)]] = Field(..., alias="topP") # Chroma fields
frequency_penalty: Optional[Annotated[float, Field(ge=-2, le=2)]] = Field(..., alias="frequencyPenalty") ids: List[str] = []
presence_penalty: Optional[Annotated[float, Field(ge=-2, le=2)]] = Field(..., alias="presencePenalty") embeddings: List[List[float]] = Field(default=[])
system_prompt: Optional[str] = Field(None, alias="systemPrompt") documents: List[str] = []
is_default: Optional[bool] = Field(..., alias="isDefault") metadatas: List[Dict[str, Any]] = []
created_at: Optional[datetime] = Field(..., alias="createdAt") # Additional fields
updated_at: Optional[datetime] = Field(..., alias="updatedAt") name: str = ""
custom_model_config: Optional[Dict[str, Any]] = Field(None, alias="customModelConfig") size: int = 0
class Config: query: str = ""
populate_by_name = True # Allow both field names and aliases query_embedding: Optional[List[float]] = Field(default=None, alias="queryEmbedding")
umap_embedding_2d: Optional[List[float]] = Field(default=None, alias="umapEmbedding2D")
umap_embedding_3d: Optional[List[float]] = Field(default=None, alias="umapEmbedding3D")
class ChatContext(BaseModel): class ChatContext(BaseModel):
type: ChatContextType type: ChatContextType
related_entity_id: Optional[str] = Field(None, alias="relatedEntityId") related_entity_id: Optional[str] = Field(None, alias="relatedEntityId")
related_entity_type: Optional[Literal["job", "candidate", "employer"]] = Field(None, alias="relatedEntityType") related_entity_type: Optional[Literal["job", "candidate", "employer"]] = Field(None, alias="relatedEntityType")
ai_parameters: AIParameters = Field(..., alias="aiParameters")
additional_context: Optional[Dict[str, Any]] = Field(None, alias="additionalContext") additional_context: Optional[Dict[str, Any]] = Field(None, alias="additionalContext")
class Config: class Config:
populate_by_name = True # Allow both field names and aliases populate_by_name = True # Allow both field names and aliases
class ChatOptions(BaseModel):
seed: Optional[int] = 8911
num_ctx: Optional[int] = Field(default=None, alias="numCtx") # Number of context tokens
temperature: Optional[float] = Field(default=0.7) # Higher temperature to encourage tool usage
class ChatMessageMetaData(BaseModel):
model: AIModelType = AIModelType.QWEN2_5
temperature: float = 0.7
max_tokens: int = Field(default=8092, alias="maxTokens")
top_p: float = Field(default=1, alias="topP")
frequency_penalty: Optional[float] = Field(None, alias="frequencyPenalty")
presence_penalty: Optional[float] = Field(None, alias="presencePenalty")
stop_sequences: Optional[List[str]] = Field(None, alias="stopSequences")
tunables: Optional[Tunables] = None
rag: List[ChromaDBGetResponse] = Field(default_factory=list)
eval_count: int = 0
eval_duration: int = 0
prompt_eval_count: int = 0
prompt_eval_duration: int = 0
options: Optional[ChatOptions] = None
tools: Optional[Dict[str, Any]] = None
timers: Optional[Dict[str, float]] = None
class Config:
populate_by_name = True # Allow both field names and aliases
class ChatMessage(BaseModel): class ChatMessage(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4())) id: str = Field(default_factory=lambda: str(uuid.uuid4()))
session_id: str = Field(..., alias="sessionId") session_id: str = Field(..., alias="sessionId")
status: ChatStatusType status: ChatStatusType
sender: ChatSenderType sender: ChatSenderType
sender_id: Optional[str] = Field(None, alias="senderId") sender_id: Optional[str] = Field(None, alias="senderId")
content: str prompt: str = ""
content: str = ""
chunk: str = ""
timestamp: datetime timestamp: datetime
attachments: Optional[List[Attachment]] = None #attachments: Optional[List[Attachment]] = None
reactions: Optional[List[MessageReaction]] = None #reactions: Optional[List[MessageReaction]] = None
is_edited: bool = Field(False, alias="isEdited") is_edited: bool = Field(False, alias="isEdited")
edit_history: Optional[List[EditHistory]] = Field(None, alias="editHistory") #edit_history: Optional[List[EditHistory]] = Field(None, alias="editHistory")
metadata: Optional[Dict[str, Any]] = None metadata: ChatMessageMetaData = Field(None)
class Config: class Config:
populate_by_name = True # Allow both field names and aliases populate_by_name = True # Allow both field names and aliases
@ -567,8 +594,8 @@ class ChatSession(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4())) id: str = Field(default_factory=lambda: str(uuid.uuid4()))
user_id: Optional[str] = Field(None, alias="userId") user_id: Optional[str] = Field(None, alias="userId")
guest_id: Optional[str] = Field(None, alias="guestId") guest_id: Optional[str] = Field(None, alias="guestId")
created_at: datetime = Field(..., alias="createdAt") created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias="createdAt")
last_activity: datetime = Field(..., alias="lastActivity") last_activity: datetime = Field(default_factory=lambda: datetime.now(UTC), alias="lastActivity")
title: Optional[str] = None title: Optional[str] = None
context: ChatContext context: ChatContext
messages: Optional[List[ChatMessage]] = None messages: Optional[List[ChatMessage]] = None
@ -614,7 +641,6 @@ class RAGConfiguration(BaseModel):
retrieval_parameters: RetrievalParameters = Field(..., alias="retrievalParameters") retrieval_parameters: RetrievalParameters = Field(..., alias="retrievalParameters")
created_at: datetime = Field(..., alias="createdAt") created_at: datetime = Field(..., alias="createdAt")
updated_at: datetime = Field(..., alias="updatedAt") updated_at: datetime = Field(..., alias="updatedAt")
is_default: bool = Field(..., alias="isDefault")
version: int version: int
is_active: bool = Field(..., alias="isActive") is_active: bool = Field(..., alias="isActive")
class Config: class Config:
@ -671,7 +697,7 @@ class UserPreference(BaseModel):
# ============================ # ============================
# API Request/Response Models # API Request/Response Models
# ============================ # ============================
class Query(BaseModel): class ChatQuery(BaseModel):
prompt: str prompt: str
tunables: Optional[Tunables] = None tunables: Optional[Tunables] = None
agent_options: Optional[Dict[str, Any]] = Field(None, alias="agentOptions") agent_options: Optional[Dict[str, Any]] = Field(None, alias="agentOptions")

View File

@ -0,0 +1,81 @@
import defines
import re
import subprocess
import math
def get_installed_ram():
try:
with open("/proc/meminfo", "r") as f:
meminfo = f.read()
match = re.search(r"MemTotal:\s+(\d+)", meminfo)
if match:
return f"{math.floor(int(match.group(1)) / 1000**2)}GB" # Convert KB to GB
except Exception as e:
return f"Error retrieving RAM: {e}"
def get_graphics_cards():
gpus = []
try:
# Run the ze-monitor utility
result = subprocess.run(
["ze-monitor"], capture_output=True, text=True, check=True
)
# Clean up the output (remove leading/trailing whitespace and newlines)
output = result.stdout.strip()
for index in range(len(output.splitlines())):
result = subprocess.run(
["ze-monitor", "--device", f"{index+1}", "--info"],
capture_output=True,
text=True,
check=True,
)
gpu_info = result.stdout.strip().splitlines()
gpu = {
"discrete": True, # Assume it's discrete initially
"name": None,
"memory": None,
}
gpus.append(gpu)
for line in gpu_info:
match = re.match(r"^Device: [^(]*\((.*)\)", line)
if match:
gpu["name"] = match.group(1)
continue
match = re.match(r"^\s*Memory: (.*)", line)
if match:
gpu["memory"] = match.group(1)
continue
match = re.match(r"^.*Is integrated with host: Yes.*", line)
if match:
gpu["discrete"] = False
continue
return gpus
except Exception as e:
return f"Error retrieving GPU info: {e}"
def get_cpu_info():
try:
with open("/proc/cpuinfo", "r") as f:
cpuinfo = f.read()
model_match = re.search(r"model name\s+:\s+(.+)", cpuinfo)
cores_match = re.findall(r"processor\s+:\s+\d+", cpuinfo)
if model_match and cores_match:
return f"{model_match.group(1)} with {len(cores_match)} cores"
except Exception as e:
return f"Error retrieving CPU info: {e}"
def system_info():
return {
"System RAM": get_installed_ram(),
"Graphics Card": get_graphics_cards(),
"CPU": get_cpu_info(),
"LLM Model": defines.model,
"Embedding Model": defines.embedding_model,
"Context length": defines.max_context,
}