Almost working again

This commit is contained in:
James Ketr 2025-05-28 22:50:38 -07:00
parent b5b3a1f5dc
commit 02a278736e
26 changed files with 2082 additions and 466 deletions

View File

@ -1,15 +1,14 @@
import React, { useEffect, useState, useRef, useCallback } from 'react';
import { Route, Routes, useLocation, useNavigate } from 'react-router-dom';
import { ThemeProvider } from '@mui/material/styles';
import { Box } from '@mui/material';
import { backstoryTheme } from './BackstoryTheme';
import { SeverityType } from 'components/Snack';
import { Query } from 'types/types';
import { ConversationHandle } from 'components/Conversation';
import { UserProvider } from 'hooks/useUser';
import { CandidateRoute } from 'routes/CandidateRoute';
import { BackstoryLayout } from 'components/layout/BackstoryLayout';
import { ChatQuery } from 'types/types';
import './BackstoryApp.css';
import '@fontsource/roboto/300.css';
@ -25,7 +24,7 @@ const BackstoryApp = () => {
const setSnack = useCallback((message: string, severity?: SeverityType) => {
snackRef.current?.setSnack(message, severity);
}, [snackRef]);
const submitQuery = (query: Query) => {
const submitQuery = (query: ChatQuery) => {
console.log(`handleSubmitChatQuery:`, query, chatRef.current ? ' sending' : 'no handler');
chatRef.current?.submitQuery(query);
navigate('/chat');

View File

@ -1,16 +1,16 @@
import Box from '@mui/material/Box';
import Button from '@mui/material/Button';
import { Query } from "../types/types";
import { ChatQuery } from "types/types";
type ChatSubmitQueryInterface = (query: Query) => void;
type ChatSubmitQueryInterface = (query: ChatQuery) => void;
interface ChatQueryInterface {
query: Query,
interface BackstoryQueryInterface {
query: ChatQuery,
submitQuery?: ChatSubmitQueryInterface
}
const ChatQuery = (props : ChatQueryInterface) => {
const BackstoryQuery = (props : BackstoryQueryInterface) => {
const { query, submitQuery } = props;
if (submitQuery === undefined) {
@ -29,11 +29,11 @@ const ChatQuery = (props : ChatQueryInterface) => {
}
export type {
ChatQueryInterface,
BackstoryQueryInterface,
ChatSubmitQueryInterface,
};
export {
ChatQuery,
BackstoryQuery,
};

View File

@ -1,7 +1,7 @@
import React, { ReactElement, JSXElementConstructor } from 'react';
import Box from '@mui/material/Box';
import { SxProps, Theme } from '@mui/material';
import { ChatSubmitQueryInterface } from './ChatQuery';
import { ChatSubmitQueryInterface } from './BackstoryQuery';
import { SetSnackType } from './Snack';
interface BackstoryElementProps {

View File

@ -14,8 +14,8 @@ import { BackstoryTextField, BackstoryTextFieldRef } from 'components/BackstoryT
import { BackstoryElementProps } from './BackstoryTab';
import { connectionBase } from 'utils/Global';
import { useUser } from "hooks/useUser";
import { ApiClient, StreamingResponse } from 'types/api-client';
import { ChatMessage, ChatContext, ChatSession, AIParameters, Query } from 'types/types';
import { StreamingResponse } from 'types/api-client';
import { ChatMessage, ChatContext, ChatSession, ChatQuery } from 'types/types';
import { PaginatedResponse } from 'types/conversion';
import './Conversation.css';
@ -29,7 +29,7 @@ const loadingMessage: ChatMessage = { ...defaultMessage, content: "Establishing
type ConversationMode = 'chat' | 'job_description' | 'resume' | 'fact_check' | 'persona';
interface ConversationHandle {
submitQuery: (query: Query) => void;
submitQuery: (query: ChatQuery) => void;
fetchHistory: () => void;
}
@ -124,22 +124,7 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
}
const createChatSession = async () => {
try {
const aiParameters: AIParameters = {
name: '',
model: 'qwen2.5',
temperature: 0.7,
topP: 1,
frequencyPenalty: 0,
presencePenalty: 0,
isDefault: true,
createdAt: new Date(),
updatedAt: new Date()
};
const chatContext: ChatContext = {
type: "general",
aiParameters
};
const chatContext: ChatContext = { type: "general" };
const response: ChatSession = await apiClient.createChatSession(chatContext);
setChatSession(response);
} catch (e) {
@ -201,14 +186,14 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
}, [chatSession]);
const handleEnter = (value: string) => {
const query: Query = {
const query: ChatQuery = {
prompt: value
}
processQuery(query);
};
useImperativeHandle(ref, () => ({
submitQuery: (query: Query) => {
submitQuery: (query: ChatQuery) => {
processQuery(query);
},
fetchHistory: () => { getChatMessages(); }
@ -253,7 +238,7 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
controllerRef.current = null;
};
const processQuery = (query: Query) => {
const processQuery = (query: ChatQuery) => {
if (controllerRef.current || !chatSession || !chatSession.id) {
return;
}

View File

@ -2,14 +2,14 @@ import React from 'react';
import { MuiMarkdown } from 'mui-markdown';
import { useTheme } from '@mui/material/styles';
import { Link } from '@mui/material';
import { ChatQuery } from './ChatQuery';
import { BackstoryQuery } from 'components/BackstoryQuery';
import Box from '@mui/material/Box';
import JsonView from '@uiw/react-json-view';
import { vscodeTheme } from '@uiw/react-json-view/vscode';
import { Mermaid } from './Mermaid';
import { Scrollable } from './Scrollable';
import { Mermaid } from 'components/Mermaid';
import { Scrollable } from 'components/Scrollable';
import { jsonrepair } from 'jsonrepair';
import { GenerateImage } from './GenerateImage';
import { GenerateImage } from 'components/GenerateImage';
import './StyledMarkdown.css';
import { BackstoryElementProps } from './BackstoryTab';
@ -98,13 +98,13 @@ const StyledMarkdown: React.FC<StyledMarkdownProps> = (props: StyledMarkdownProp
}
}
},
ChatQuery: {
BackstoryQuery: {
component: (props: { query: string }) => {
const queryString = props.query.replace(/(\w+):/g, '"$1":');
try {
const query = JSON.parse(queryString);
return <ChatQuery submitQuery={submitQuery} query={query} />
return <BackstoryQuery submitQuery={submitQuery} query={query} />
} catch (e) {
console.log("StyledMarkdown error:", queryString, e);
return props.query;

View File

@ -57,7 +57,7 @@ const getBackstoryDynamicRoutes = (props: BackstoryDynamicRoutesProps): ReactNod
routes.push(<Route key={`${index++}`} path="/login" element={<LoginPage />} />);
routes.push(<Route key={`${index++}`} path="*" element={<BetaPage />} />);
} else {
routes.push(<Route key={`${index++}`} path="/login" element={<LoginPage />} />);
routes.push(<Route key={`${index++}`} path="/logout" element={<LogoutPage />} />);
if (user.userType === 'candidate') {

View File

@ -42,8 +42,20 @@ const UserProvider: React.FC<UserProviderProps> = (props: UserProviderProps) =>
console.log("Guest =>", guest);
}, [guest]);
/* If the user changes to a non-null value, create a new
* apiClient with the access token */
useEffect(() => {
console.log("User => ", user);
if (user === null) {
return;
}
/* This apiClient will persist until the user is changed
* or logged out */
const accessToken = localStorage.getItem('accessToken');
if (!accessToken) {
throw Error("accessToken is not set for user!");
}
setApiClient(new ApiClient(accessToken));
}, [user]);
/* Handle logout if any consumers of UserProvider setUser to NULL */
@ -55,12 +67,18 @@ const UserProvider: React.FC<UserProviderProps> = (props: UserProviderProps) =>
}
const logout = async () => {
if (!user) {
if (!activeUser) {
return;
}
console.log(`Logging out ${user.email}`);
console.log(`Logging out ${activeUser.email}`);
try {
const results = await apiClient.logout();
const accessToken = localStorage.getItem('accessToken');
const refreshToken = localStorage.getItem('refreshToken');
if (!accessToken || !refreshToken) {
setSnack("Authentication tokens are invalid.", "error");
return;
}
const results = await apiClient.logout(accessToken, refreshToken);
if (results.error) {
console.error(results.error);
setSnack(results.error.message, "error")
@ -98,9 +116,9 @@ const UserProvider: React.FC<UserProviderProps> = (props: UserProviderProps) =>
};
const checkExistingAuth = () => {
const token = localStorage.getItem('accessToken');
const accessToken = localStorage.getItem('accessToken');
const userData = localStorage.getItem('userData');
if (token && userData) {
if (accessToken && userData) {
try {
const user = JSON.parse(userData);
// Convert dates back to Date objects if they're stored as strings
@ -113,7 +131,7 @@ const UserProvider: React.FC<UserProviderProps> = (props: UserProviderProps) =>
if (user.lastLogin && typeof user.lastLogin === 'string') {
user.lastLogin = new Date(user.lastLogin);
}
setApiClient(new ApiClient(token));
setApiClient(new ApiClient(accessToken));
setUser(user);
} catch (e) {
localStorage.removeItem('accessToken');

View File

@ -1,5 +1,5 @@
import React, { useState, useEffect } from 'react';
import { useNavigate } from 'react-router-dom';
import { useNavigate, useLocation } from 'react-router-dom';
import {
Box,
Container,
@ -35,7 +35,11 @@ const BetaPage: React.FC<BetaPageProps> = ({
const theme = useTheme();
const [showSparkle, setShowSparkle] = useState<boolean>(false);
const navigate = useNavigate();
const location = useLocation();
if (!children) {
children = (<Box>Location: {location.pathname}</Box>);
}
console.log("BetaPage", children);
// Enhanced sparkle effect for background elements

View File

@ -6,7 +6,7 @@ import MuiMarkdown from 'mui-markdown';
import { BackstoryPageProps } from '../components/BackstoryTab';
import { Conversation, ConversationHandle } from '../components/Conversation';
import { ChatQuery } from '../components/ChatQuery';
import { BackstoryQuery } from '../components/BackstoryQuery';
import { CandidateInfo } from 'components/CandidateInfo';
import { useUser } from "../hooks/useUser";
@ -26,7 +26,7 @@ const ChatPage = forwardRef<ConversationHandle, BackstoryPageProps>((props: Back
setQuestions([
<Box sx={{ display: "flex", flexDirection: isMobile ? "column" : "row" }}>
{candidate.questions?.map(({ question, tunables }, i: number) =>
<ChatQuery key={i} query={{ prompt: question, tunables: tunables }} submitQuery={submitQuery} />
<BackstoryQuery key={i} query={{ prompt: question, tunables: tunables }} submitQuery={submitQuery} />
)}
</Box>,
<Box sx={{ p: 1 }}>

View File

@ -19,7 +19,7 @@ import { StyledMarkdown } from 'components/StyledMarkdown';
import { Scrollable } from '../components/Scrollable';
import { Pulse } from 'components/Pulse';
import { StreamingResponse } from 'types/api-client';
import { ChatContext, ChatSession, AIParameters, Query } from 'types/types';
import { ChatContext, ChatSession, ChatQuery } from 'types/types';
import { useUser } from 'hooks/useUser';
const emptyUser: Candidate = {
@ -72,12 +72,7 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
const createChatSession = async () => {
try {
const aiParameters: AIParameters = { model: 'qwen2.5' };
const chatContext: ChatContext = {
type: "generate_persona",
aiParameters
};
const chatContext: ChatContext = { type: "generate_persona" };
const response: ChatSession = await apiClient.createChatSession(chatContext);
setChatSession(response);
setSnack(`Chat session created for generate_persona: ${response.id}`);
@ -90,7 +85,7 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
createChatSession();
}, [chatSession, setChatSession]);
const generatePersona = useCallback((query: Query) => {
const generatePersona = useCallback((query: ChatQuery) => {
if (!chatSession || !chatSession.id) {
return;
}
@ -196,7 +191,7 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
if (processing) {
return;
}
const query: Query = {
const query: ChatQuery = {
prompt: value,
}
generatePersona(query);

View File

@ -0,0 +1,57 @@
.PhoneInput:disabled {
opacity: 0.38;
}
/* .PhoneInput:not(:active):not(:focus):not(:hover) {
} */
.PhoneInput::placeholder {
color: rgba(46, 46, 46, 0.38);
}
.PhoneInput:focus,
.PhoneInput:active {
outline: 2px solid black;
}
.PhoneInput:hover:not(:active):not(:focus) {
outline: 1px solid black;
}
.PhoneInputInput {
font: inherit;
letter-spacing: inherit;
color: currentColor;
padding: 4px 0 5px;
border: 0;
box-sizing: content-box;
background: none;
height: 1.4375em;
margin: 0;
-webkit-tap-highlight-color: transparent;
display: block;
min-width: 0;
width: 100%;
-webkit-animation-name: mui-auto-fill-cancel;
animation-name: mui-auto-fill-cancel;
-webkit-animation-duration: 10ms;
animation-duration: 10ms;
padding: 16.5px 14px;
}
.PhoneInputCountry {
min-width: 64px;
justify-content: center;
}
.PhoneInputCountry:focus,
.PhoneInputCountry:active {
outline: 2px solid black;
}
.PhoneInput {
display: flex;
outline: 1px solid rgba(46, 46, 46, 0.38);
border: none;
}

View File

@ -0,0 +1,453 @@
import React, { useState, useEffect } from 'react';
import {
Box,
Container,
Paper,
TextField,
Button,
Typography,
Grid,
Alert,
CircularProgress,
Tabs,
Tab,
AppBar,
Toolbar,
Card,
CardContent,
Divider,
Avatar
} from '@mui/material';
import { Person, PersonAdd, AccountCircle, ExitToApp } from '@mui/icons-material';
import 'react-phone-number-input/style.css';
import PhoneInput from 'react-phone-number-input';
import { E164Number } from 'libphonenumber-js/core';
import './LoginPage.css';
import { ApiClient } from 'types/api-client';
import { useUser } from 'hooks/useUser';
// Import conversion utilities
import {
formatApiRequest,
parseApiResponse,
handleApiResponse,
extractApiData,
isSuccessResponse,
debugConversion,
type ApiResponse
} from 'types/conversion';
import {
AuthResponse, User, Guest, Candidate
} from 'types/types'
import { useNavigate } from 'react-router-dom';
interface LoginRequest {
login: string;
password: string;
}
interface RegisterRequest {
username: string;
email: string;
firstName: string;
lastName: string;
password: string;
phone?: string;
}
const apiClient = new ApiClient();
const LoginPage: React.FC = () => {
const navigate = useNavigate();
const { user, setUser, guest } = useUser();
const [tabValue, setTabValue] = useState(0);
const [loading, setLoading] = useState(false);
const [error, setError] = useState<string | null>(null);
const [success, setSuccess] = useState<string | null>(null);
const [phone, setPhone] = useState<E164Number | null>(null);
const name = (user?.userType === 'candidate' ? (user as Candidate).username : user?.email) || '';
// Login form state
const [loginForm, setLoginForm] = useState<LoginRequest>({
login: '',
password: ''
});
// Register form state
const [registerForm, setRegisterForm] = useState<RegisterRequest>({
username: '',
email: '',
firstName: '',
lastName: '',
password: '',
phone: ''
});
useEffect(() => {
if (phone !== registerForm.phone && phone) {
console.log({ phone });
setRegisterForm({ ...registerForm, phone });
}
}, [phone, registerForm]);
const handleLogin = async (e: React.FormEvent) => {
e.preventDefault();
setLoading(true);
setError(null);
setSuccess(null);
try {
const authResponse = await apiClient.login(loginForm.login, loginForm.password)
debugConversion(authResponse, 'Login Response');
// Store tokens in localStorage
localStorage.setItem('accessToken', authResponse.accessToken);
localStorage.setItem('refreshToken', authResponse.refreshToken);
localStorage.setItem('userData', JSON.stringify(authResponse.user));
setSuccess('Login successful!');
navigate('/');
setUser(authResponse.user);
// Clear form
setLoginForm({ login: '', password: '' });
} catch (err) {
console.error('Login error:', err);
setError(err instanceof Error ? err.message : 'Login failed');
} finally {
setLoading(false);
}
};
const handleRegister = async (e: React.FormEvent) => {
e.preventDefault();
setLoading(true);
setError(null);
setSuccess(null);
try {
const candidate: Candidate = {
username: registerForm.username,
email: registerForm.email,
firstName: registerForm.firstName,
lastName: registerForm.lastName,
fullName: `${registerForm.firstName} ${registerForm.lastName}`,
phone: registerForm.phone || undefined,
userType: 'candidate',
status: 'active',
createdAt: new Date(),
updatedAt: new Date(),
skills: [],
experience: [],
education: [],
preferredJobTypes: [],
languages: [],
certifications: [],
location: {
city: '',
country: '',
remote: true
}
};
const result = await apiClient.createCandidate(candidate);
debugConversion(result, 'Registration Response');
setSuccess('Registration successful! You can now login.');
// Clear form and switch to login tab
setRegisterForm({
username: '',
email: '',
firstName: '',
lastName: '',
password: '',
phone: ''
});
setTabValue(0);
} catch (err) {
console.error('Registration error:', err);
setError(err instanceof Error ? err.message : 'Registration failed');
} finally {
setLoading(false);
}
};
const handleTabChange = (event: React.SyntheticEvent, newValue: number) => {
setTabValue(newValue);
setError(null);
setSuccess(null);
};
// If user is logged in, show their profile
if (user) {
return (
<Container maxWidth="md" sx={{ mt: 4 }}>
<Card elevation={3}>
<CardContent>
<Box sx={{ display: 'flex', alignItems: 'center', mb: 3 }}>
<Avatar sx={{ mr: 2, bgcolor: 'primary.main' }}>
<AccountCircle />
</Avatar>
<Typography variant="h4" component="h1">
User Profile
</Typography>
</Box>
<Divider sx={{ mb: 3 }} />
<Grid container spacing={3}>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="body1" sx={{ mb: 1 }}>
<strong>Username:</strong> {name}
</Typography>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="body1" sx={{ mb: 1 }}>
<strong>Email:</strong> {user.email}
</Typography>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="body1" sx={{ mb: 1 }}>
<strong>Status:</strong> {user.status}
</Typography>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="body1" sx={{ mb: 1 }}>
<strong>Phone:</strong> {user.phone || 'Not provided'}
</Typography>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="body1" sx={{ mb: 1 }}>
<strong>Last Login:</strong> {
user.lastLogin
? user.lastLogin.toLocaleString()
: 'N/A'
}
</Typography>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="body1" sx={{ mb: 1 }}>
<strong>Member Since:</strong> {user.createdAt.toLocaleDateString()}
</Typography>
</Grid>
</Grid>
</CardContent>
</Card>
</Container>
);
}
const validateInput = (value: string) => {
if (!value) return 'This field is required';
// Username: alphanumeric, 3-20 characters, no @
const usernameRegex = /^[a-zA-Z0-9]{3,20}$/;
// Email: basic email format
const emailRegex = /^[^\s@]+@[^\s@]+\.[^\s@]+$/;
if (usernameRegex.test(value)) return '';
if (emailRegex.test(value)) return '';
return 'Enter a valid username (3-20 alphanumeric characters) or email';
};
const handleLoginChange = (event: React.ChangeEvent<HTMLInputElement>) => {
const { value } = event.target;
setLoginForm({ ...loginForm, login: value });
setError(validateInput(value));
};
return (
<Container maxWidth="sm" sx={{ mt: 4 }}>
<Paper elevation={3} sx={{ p: 4 }}>
<Typography variant="h4" component="h1" gutterBottom align="center" color="primary">
Backstory
</Typography>
{guest && (
<Card sx={{ mb: 3, bgcolor: 'grey.50' }} elevation={1}>
<CardContent>
<Typography variant="h6" gutterBottom color="primary">
Guest Session Active
</Typography>
<Typography variant="body2" color="text.secondary" sx={{ mb: 0.5 }}>
Session ID: {guest.sessionId}
</Typography>
<Typography variant="body2" color="text.secondary">
Created: {guest.createdAt.toLocaleString()}
</Typography>
</CardContent>
</Card>
)}
<Box sx={{ borderBottom: 1, borderColor: 'divider', mb: 3 }}>
<Tabs value={tabValue} onChange={handleTabChange} centered>
<Tab icon={<Person />} label="Login" />
<Tab icon={<PersonAdd />} label="Register" />
</Tabs>
</Box>
{error && (
<Alert severity="error" sx={{ mb: 2 }}>
{error}
</Alert>
)}
{success && (
<Alert severity="success" sx={{ mb: 2 }}>
{success}
</Alert>
)}
{tabValue === 0 && (
<Box component="form" onSubmit={handleLogin}>
<Typography variant="h5" gutterBottom>
Sign In
</Typography>
<TextField
fullWidth
label="Username or Email"
type="text"
value={loginForm.login}
onChange={handleLoginChange}
margin="normal"
required
disabled={loading}
variant="outlined"
placeholder="Enter username or email"
/>
<TextField
fullWidth
label="Password"
type="password"
value={loginForm.password}
onChange={(e) => setLoginForm({ ...loginForm, password: e.target.value })}
margin="normal"
required
disabled={loading}
variant="outlined"
autoComplete='current-password'
/>
<Button
type="submit"
fullWidth
variant="contained"
sx={{ mt: 3, mb: 2 }}
disabled={loading}
startIcon={loading ? <CircularProgress size={20} color="inherit" /> : <Person />}
>
{loading ? 'Signing In...' : 'Sign In'}
</Button>
</Box>
)}
{tabValue === 1 && (
<Box component="form" onSubmit={handleRegister}>
<Typography variant="h5" gutterBottom>
Create Account
</Typography>
<Grid container spacing={2} sx={{ mb: 2 }}>
<Grid size={{ xs: 12, sm: 6 }}>
<TextField
fullWidth
label="First Name"
value={registerForm.firstName}
onChange={(e) => setRegisterForm({ ...registerForm, firstName: e.target.value })}
required
disabled={loading}
variant="outlined"
/>
</Grid>
<Grid size={{ xs: 12, sm: 6 }}>
<TextField
fullWidth
label="Last Name"
value={registerForm.lastName}
onChange={(e) => setRegisterForm({ ...registerForm, lastName: e.target.value })}
required
disabled={loading}
variant="outlined"
/>
</Grid>
</Grid>
<TextField
fullWidth
label="Username"
value={registerForm.username}
onChange={(e) => setRegisterForm({ ...registerForm, username: e.target.value })}
margin="normal"
required
disabled={loading}
variant="outlined"
/>
<TextField
fullWidth
label="Email"
type="email"
value={registerForm.email}
onChange={(e) => setRegisterForm({ ...registerForm, email: e.target.value })}
margin="normal"
required
disabled={loading}
variant="outlined"
/>
<PhoneInput
label="Phone (Optional)"
placeholder="Enter phone number"
defaultCountry='US'
value={registerForm.phone}
disabled={loading}
onChange={(v) => setPhone(v as E164Number)} />
{/* <TextField
fullWidth
label="Phone (Optional)"
type="tel"
value={registerForm.phone}
onChange={(e) => setRegisterForm({ ...registerForm, phone: e.target.value })}
margin="normal"
disabled={loading}
variant="outlined"
/> */}
<TextField
fullWidth
label="Password"
type="password"
value={registerForm.password}
onChange={(e) => setRegisterForm({ ...registerForm, password: e.target.value })}
margin="normal"
required
disabled={loading}
variant="outlined"
/>
<Button
type="submit"
fullWidth
variant="contained"
sx={{ mt: 3, mb: 2 }}
disabled={loading}
startIcon={loading ? <CircularProgress size={20} color="inherit" /> : <PersonAdd />}
>
{loading ? 'Creating Account...' : 'Create Account'}
</Button>
</Box>
)}
</Paper>
</Container>
);
};
export { LoginPage };

View File

@ -6,11 +6,11 @@ import {
} from '@mui/material';
import { SxProps } from '@mui/material';
import { ChatQuery } from '../components/ChatQuery';
import { MessageList, BackstoryMessage } from '../components/Message';
import { Conversation } from '../components/Conversation';
import { BackstoryPageProps } from '../components/BackstoryTab';
import { Query } from "../types/types";
import { BackstoryQuery } from 'components/BackstoryQuery';
import { MessageList, BackstoryMessage } from 'components/Message';
import { Conversation } from 'components/Conversation';
import { BackstoryPageProps } from 'components/BackstoryTab';
import { ChatQuery } from "types/types";
import './ResumeBuilderPage.css';
@ -43,17 +43,17 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
setActiveTab(newValue);
};
const handleJobQuery = (query: Query) => {
const handleJobQuery = (query: ChatQuery) => {
console.log(`handleJobQuery: ${query.prompt} -- `, jobConversationRef.current ? ' sending' : 'no handler');
jobConversationRef.current?.submitQuery(query);
};
const handleResumeQuery = (query: Query) => {
const handleResumeQuery = (query: ChatQuery) => {
console.log(`handleResumeQuery: ${query.prompt} -- `, resumeConversationRef.current ? ' sending' : 'no handler');
resumeConversationRef.current?.submitQuery(query);
};
const handleFactsQuery = (query: Query) => {
const handleFactsQuery = (query: ChatQuery) => {
console.log(`handleFactsQuery: ${query.prompt} -- `, factsConversationRef.current ? ' sending' : 'no handler');
factsConversationRef.current?.submitQuery(query);
};
@ -202,8 +202,8 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
// console.log('renderJobDescriptionView');
// const jobDescriptionQuestions = [
// <Box sx={{ display: "flex", flexDirection: "column" }}>
// <ChatQuery query={{ prompt: "What are the key skills necessary for this position?", tunables: { enableTools: false } }} submitQuery={handleJobQuery} />
// <ChatQuery query={{ prompt: "How much should this position pay (accounting for inflation)?", tunables: { enableTools: false } }} submitQuery={handleJobQuery} />
// <BackstoryQuery query={{ prompt: "What are the key skills necessary for this position?", tunables: { enableTools: false } }} submitQuery={handleJobQuery} />
// <BackstoryQuery query={{ prompt: "How much should this position pay (accounting for inflation)?", tunables: { enableTools: false } }} submitQuery={handleJobQuery} />
// </Box>,
// ];
@ -274,8 +274,8 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
// const renderResumeView = useCallback((sx?: SxProps) => {
// const resumeQuestions = [
// <Box sx={{ display: "flex", flexDirection: "column" }}>
// <ChatQuery query={{ prompt: "Is this resume a good fit for the provided job description?", tunables: { enableTools: false } }} submitQuery={handleResumeQuery} />
// <ChatQuery query={{ prompt: "Provide a more concise resume.", tunables: { enableTools: false } }} submitQuery={handleResumeQuery} />
// <BackstoryQuery query={{ prompt: "Is this resume a good fit for the provided job description?", tunables: { enableTools: false } }} submitQuery={handleResumeQuery} />
// <BackstoryQuery query={{ prompt: "Provide a more concise resume.", tunables: { enableTools: false } }} submitQuery={handleResumeQuery} />
// </Box>,
// ];
@ -323,7 +323,7 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
// const renderFactCheckView = useCallback((sx?: SxProps) => {
// const factsQuestions = [
// <Box sx={{ display: "flex", flexDirection: "column" }}>
// <ChatQuery query={{ prompt: "Rewrite the resume to address any discrepancies.", tunables: { enableTools: false } }} submitQuery={handleFactsQuery} />
// <BackstoryQuery query={{ prompt: "Rewrite the resume to address any discrepancies.", tunables: { enableTools: false } }} submitQuery={handleFactsQuery} />
// </Box>,
// ];

View File

@ -36,7 +36,7 @@ const CandidateRoute: React.FC<CandidateRouteProps> = (props: CandidateRouteProp
}
getCandidate(username);
}, [candidate, username, setCandidate, navigate, setSnack]);
}, [candidate, username, setCandidate, navigate, setSnack, apiClient]);
if (candidate === null) {
return (<Box>

View File

@ -59,7 +59,7 @@ class ApiClient {
private baseUrl: string;
private defaultHeaders: Record<string, string>;
constructor(authToken?: string) {
constructor(accessToken?: string) {
const loc = window.location;
if (!loc.host.match(/.*battle-linux.*/)) {
this.baseUrl = loc.protocol + "//" + loc.host + "/api/1.0";
@ -68,7 +68,7 @@ class ApiClient {
}
this.defaultHeaders = {
'Content-Type': 'application/json',
...(authToken && { 'Authorization': `Bearer ${authToken}` })
...(accessToken && { 'Authorization': `Bearer ${accessToken}` })
};
}
@ -86,10 +86,12 @@ class ApiClient {
return handleApiResponse<Types.AuthResponse>(response);
}
async logout(): Promise<Types.ApiResponse> {
async logout(accessToken: string, refreshToken: string): Promise<Types.ApiResponse> {
console.log(this.defaultHeaders);
const response = await fetch(`${this.baseUrl}/auth/logout`, {
method: 'POST',
headers: this.defaultHeaders,
body: JSON.stringify(formatApiRequest({ accessToken, refreshToken }))
});
return handleApiResponse<Types.ApiResponse>(response);
@ -324,11 +326,11 @@ class ApiClient {
/**
* Send message with standard response (non-streaming)
*/
async sendMessage(sessionId: string, query: Types.Query): Promise<Types.ChatMessage> {
async sendMessage(sessionId: string, query: Types.ChatQuery): Promise<Types.ChatMessage> {
const response = await fetch(`${this.baseUrl}/chat/sessions/${sessionId}/messages`, {
method: 'POST',
headers: this.defaultHeaders,
body: JSON.stringify(formatApiRequest({ query }))
body: JSON.stringify(formatApiRequest({query}))
});
return handleApiResponse<Types.ChatMessage>(response);
@ -339,7 +341,7 @@ class ApiClient {
*/
sendMessageStream(
sessionId: string,
query: Types.Query,
query: Types.ChatQuery,
options: StreamingOptions = {}
): StreamingResponse {
const abortController = new AbortController();
@ -488,7 +490,7 @@ class ApiClient {
*/
async sendMessageAuto(
sessionId: string,
query: Types.Query,
query: Types.ChatQuery,
options?: StreamingOptions
): Promise<Types.ChatMessage> {
// If streaming options are provided, use streaming
@ -512,36 +514,6 @@ class ApiClient {
return handlePaginatedApiResponse<Types.ChatMessage>(response);
}
// ============================
// AI Configuration Methods
// ============================
async createAIParameters(params: Omit<Types.AIParameters, 'id' | 'createdAt' | 'updatedAt'>): Promise<Types.AIParameters> {
const response = await fetch(`${this.baseUrl}/ai/parameters`, {
method: 'POST',
headers: this.defaultHeaders,
body: JSON.stringify(formatApiRequest(params))
});
return handleApiResponse<Types.AIParameters>(response);
}
async getAIParameters(id: string): Promise<Types.AIParameters> {
const response = await fetch(`${this.baseUrl}/ai/parameters/${id}`, {
headers: this.defaultHeaders
});
return handleApiResponse<Types.AIParameters>(response);
}
async getUserAIParameters(userId: string): Promise<Types.AIParameters[]> {
const response = await fetch(`${this.baseUrl}/users/${userId}/ai/parameters`, {
headers: this.defaultHeaders
});
return handleApiResponse<Types.AIParameters[]>(response);
}
// ============================
// Error Handling Helper
// ============================
@ -589,7 +561,7 @@ export function useStreamingChat(sessionId: string) {
const apiClient = useApiClient();
const streamingRef = useRef<StreamingResponse | null>(null);
const sendMessage = useCallback(async (query: Types.Query) => {
const sendMessage = useCallback(async (query: Types.ChatQuery) => {
setError(null);
setIsStreaming(true);
setCurrentMessage(null);

View File

@ -1,6 +1,6 @@
// Generated TypeScript types from Pydantic models
// Source: src/backend/models.py
// Generated on: 2025-05-29T02:05:50.622601
// Generated on: 2025-05-29T05:47:25.809967
// DO NOT EDIT MANUALLY - This file is auto-generated
// ============================
@ -17,7 +17,7 @@ export type ChatContextType = "job_search" | "candidate_screening" | "interview_
export type ChatSenderType = "user" | "ai" | "system";
export type ChatStatusType = "partial" | "done" | "streaming" | "thinking" | "error";
export type ChatStatusType = "preparing" | "thinking" | "partial" | "streaming" | "done" | "error";
export type ColorBlindMode = "protanopia" | "deuteranopia" | "tritanopia" | "none";
@ -63,141 +63,123 @@ export type VectorStoreType = "pinecone" | "qdrant" | "faiss" | "milvus" | "weav
// Interfaces
// ============================
export interface AIParameters {
id?: string;
userId?: string;
name?: string;
description?: string;
model?: "qwen2.5" | "flux-schnell";
temperature?: number;
maxTokens?: number;
topP?: number;
frequencyPenalty?: number;
presencePenalty?: number;
systemPrompt?: string;
isDefault?: boolean;
createdAt?: Date;
updatedAt?: Date;
customModelConfig?: Record<string, any>;
}
export interface AccessibilitySettings {
fontSize?: "small" | "medium" | "large";
highContrast?: boolean;
reduceMotion?: boolean;
screenReader?: boolean;
fontSize: "small" | "medium" | "large";
highContrast: boolean;
reduceMotion: boolean;
screenReader: boolean;
colorBlindMode?: "protanopia" | "deuteranopia" | "tritanopia" | "none";
}
export interface Analytics {
id?: string;
entityType?: "job" | "candidate" | "chat" | "system" | "employer";
entityId?: string;
metricType?: string;
value?: number;
timestamp?: Date;
entityType: "job" | "candidate" | "chat" | "system" | "employer";
entityId: string;
metricType: string;
value: number;
timestamp: Date;
dimensions?: Record<string, any>;
segment?: string;
}
export interface ApiResponse {
success?: boolean;
success: boolean;
data?: any;
error?: ErrorDetail;
meta?: Record<string, any>;
}
export interface ApplicationDecision {
status?: "accepted" | "rejected";
status: "accepted" | "rejected";
reason?: string;
date?: Date;
by?: string;
date: Date;
by: string;
}
export interface Attachment {
id?: string;
fileName?: string;
fileType?: string;
fileSize?: number;
fileUrl?: string;
uploadedAt?: Date;
isProcessed?: boolean;
fileName: string;
fileType: string;
fileSize: number;
fileUrl: string;
uploadedAt: Date;
isProcessed: boolean;
processingResult?: any;
thumbnailUrl?: string;
}
export interface AuthResponse {
accessToken?: string;
refreshToken?: string;
user?: any;
expiresAt?: number;
accessToken: string;
refreshToken: string;
user: any;
expiresAt: number;
}
export interface Authentication {
userId?: string;
passwordHash?: string;
salt?: string;
refreshTokens?: Array<RefreshToken>;
userId: string;
passwordHash: string;
salt: string;
refreshTokens: Array<RefreshToken>;
resetPasswordToken?: string;
resetPasswordExpiry?: Date;
lastPasswordChange?: Date;
mfaEnabled?: boolean;
lastPasswordChange: Date;
mfaEnabled: boolean;
mfaMethod?: "app" | "sms" | "email";
mfaSecret?: string;
loginAttempts?: number;
loginAttempts: number;
lockedUntil?: Date;
}
export interface BaseUser {
id?: string;
email?: string;
email: string;
phone?: string;
createdAt?: Date;
updatedAt?: Date;
createdAt: Date;
updatedAt: Date;
lastLogin?: Date;
profileImage?: string;
status?: "active" | "inactive" | "pending" | "banned";
status: "active" | "inactive" | "pending" | "banned";
}
export interface BaseUserWithType {
id?: string;
email?: string;
email: string;
phone?: string;
createdAt?: Date;
updatedAt?: Date;
createdAt: Date;
updatedAt: Date;
lastLogin?: Date;
profileImage?: string;
status?: "active" | "inactive" | "pending" | "banned";
userType?: "candidate" | "employer" | "guest";
status: "active" | "inactive" | "pending" | "banned";
userType: "candidate" | "employer" | "guest";
}
export interface Candidate {
id?: string;
email?: string;
email: string;
phone?: string;
createdAt?: Date;
updatedAt?: Date;
createdAt: Date;
updatedAt: Date;
lastLogin?: Date;
profileImage?: string;
status?: "active" | "inactive" | "pending" | "banned";
status: "active" | "inactive" | "pending" | "banned";
userType?: "candidate";
username?: string;
firstName?: string;
lastName?: string;
fullName?: string;
username: string;
firstName: string;
lastName: string;
fullName: string;
description?: string;
resume?: string;
skills?: Array<Skill>;
experience?: Array<WorkExperience>;
skills: Array<Skill>;
experience: Array<WorkExperience>;
questions?: Array<CandidateQuestion>;
education?: Array<Education>;
preferredJobTypes?: Array<"full-time" | "part-time" | "contract" | "internship" | "freelance">;
education: Array<Education>;
preferredJobTypes: Array<"full-time" | "part-time" | "contract" | "internship" | "freelance">;
desiredSalary?: DesiredSalary;
location?: Location;
location: Location;
availabilityDate?: Date;
summary?: string;
languages?: Array<Language>;
certifications?: Array<Certification>;
languages: Array<Language>;
certifications: Array<Certification>;
jobApplications?: Array<JobApplication>;
hasProfile?: boolean;
age?: number;
@ -206,24 +188,24 @@ export interface Candidate {
}
export interface CandidateContact {
email?: string;
email: string;
phone?: string;
}
export interface CandidateListResponse {
success?: boolean;
success: boolean;
data?: Array<Candidate>;
error?: ErrorDetail;
meta?: Record<string, any>;
}
export interface CandidateQuestion {
question?: string;
question: string;
tunables?: Tunables;
}
export interface CandidateResponse {
success?: boolean;
success: boolean;
data?: Candidate;
error?: ErrorDetail;
meta?: Record<string, any>;
@ -231,35 +213,64 @@ export interface CandidateResponse {
export interface Certification {
id?: string;
name?: string;
issuingOrganization?: string;
issueDate?: Date;
name: string;
issuingOrganization: string;
issueDate: Date;
expirationDate?: Date;
credentialId?: string;
credentialUrl?: string;
}
export interface ChatContext {
type?: "job_search" | "candidate_screening" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile";
type: "job_search" | "candidate_screening" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile";
relatedEntityId?: string;
relatedEntityType?: "job" | "candidate" | "employer";
aiParameters?: AIParameters;
additionalContext?: Record<string, any>;
}
export interface ChatMessage {
id?: string;
sessionId?: string;
status?: "partial" | "done" | "streaming" | "thinking" | "error";
sender?: "user" | "ai" | "system";
sessionId: string;
status: "preparing" | "thinking" | "partial" | "streaming" | "done" | "error";
sender: "user" | "ai" | "system";
senderId?: string;
prompt?: string;
content?: string;
timestamp?: Date;
attachments?: Array<Attachment>;
reactions?: Array<MessageReaction>;
chunk?: string;
timestamp: Date;
isEdited?: boolean;
editHistory?: Array<EditHistory>;
metadata?: Record<string, any>;
metadata?: ChatMessageMetaData;
}
export interface ChatMessageMetaData {
model?: "qwen2.5" | "flux-schnell";
temperature?: number;
maxTokens?: number;
topP?: number;
frequencyPenalty?: number;
presencePenalty?: number;
stopSequences?: Array<string>;
tunables?: Tunables;
rag?: Array<ChromaDBGetResponse>;
evalCount?: number;
evalDuration?: number;
promptEvalCount?: number;
promptEvalDuration?: number;
options?: ChatOptions;
tools?: Record<string, any>;
timers?: Record<string, number>;
}
export interface ChatOptions {
seed?: number;
numCtx?: number;
temperature?: number;
}
export interface ChatQuery {
prompt: string;
tunables?: Tunables;
agentOptions?: Record<string, any>;
}
export interface ChatSession {
@ -269,51 +280,64 @@ export interface ChatSession {
createdAt?: Date;
lastActivity?: Date;
title?: string;
context?: ChatContext;
context: ChatContext;
messages?: Array<ChatMessage>;
isArchived?: boolean;
systemPrompt?: string;
}
export interface ChromaDBGetResponse {
ids?: Array<string>;
embeddings?: Array<Array<number>>;
documents?: Array<string>;
metadatas?: Array<Record<string, any>>;
name?: string;
size?: number;
query?: string;
queryEmbedding?: Array<number>;
umapEmbedding2D?: Array<number>;
umapEmbedding3D?: Array<number>;
}
export interface CustomQuestion {
question?: string;
answer?: string;
question: string;
answer: string;
}
export interface DataSourceConfiguration {
id?: string;
ragConfigId?: string;
name?: string;
sourceType?: "document" | "website" | "api" | "database" | "internal";
connectionDetails?: Record<string, any>;
processingPipeline?: Array<ProcessingStep>;
ragConfigId: string;
name: string;
sourceType: "document" | "website" | "api" | "database" | "internal";
connectionDetails: Record<string, any>;
processingPipeline: Array<ProcessingStep>;
refreshSchedule?: string;
lastRefreshed?: Date;
status?: "active" | "pending" | "error" | "processing";
status: "active" | "pending" | "error" | "processing";
errorDetails?: string;
metadata?: Record<string, any>;
}
export interface DesiredSalary {
amount?: number;
currency?: string;
period?: "hour" | "day" | "month" | "year";
amount: number;
currency: string;
period: "hour" | "day" | "month" | "year";
}
export interface EditHistory {
content?: string;
editedAt?: Date;
editedBy?: string;
content: string;
editedAt: Date;
editedBy: string;
}
export interface Education {
id?: string;
institution?: string;
degree?: string;
fieldOfStudy?: string;
startDate?: Date;
institution: string;
degree: string;
fieldOfStudy: string;
startDate: Date;
endDate?: Date;
isCurrent?: boolean;
isCurrent: boolean;
gpa?: number;
achievements?: Array<string>;
location?: Location;
@ -321,45 +345,45 @@ export interface Education {
export interface Employer {
id?: string;
email?: string;
email: string;
phone?: string;
createdAt?: Date;
updatedAt?: Date;
createdAt: Date;
updatedAt: Date;
lastLogin?: Date;
profileImage?: string;
status?: "active" | "inactive" | "pending" | "banned";
status: "active" | "inactive" | "pending" | "banned";
userType?: "employer";
companyName?: string;
industry?: string;
companyName: string;
industry: string;
description?: string;
companySize?: string;
companyDescription?: string;
companySize: string;
companyDescription: string;
websiteUrl?: string;
jobs?: Array<Job>;
location?: Location;
location: Location;
companyLogo?: string;
socialLinks?: Array<SocialLink>;
poc?: PointOfContact;
}
export interface EmployerResponse {
success?: boolean;
success: boolean;
data?: Employer;
error?: ErrorDetail;
meta?: Record<string, any>;
}
export interface ErrorDetail {
code?: string;
message?: string;
code: string;
message: string;
details?: any;
}
export interface Guest {
id?: string;
sessionId?: string;
createdAt?: Date;
lastActivity?: Date;
sessionId: string;
createdAt: Date;
lastActivity: Date;
convertedToUserId?: string;
ipAddress?: string;
userAgent?: string;
@ -367,49 +391,49 @@ export interface Guest {
export interface InterviewFeedback {
id?: string;
interviewId?: string;
reviewerId?: string;
technicalScore?: number;
culturalScore?: number;
overallScore?: number;
strengths?: Array<string>;
weaknesses?: Array<string>;
recommendation?: "strong_hire" | "hire" | "no_hire" | "strong_no_hire";
comments?: string;
createdAt?: Date;
updatedAt?: Date;
isVisible?: boolean;
interviewId: string;
reviewerId: string;
technicalScore: number;
culturalScore: number;
overallScore: number;
strengths: Array<string>;
weaknesses: Array<string>;
recommendation: "strong_hire" | "hire" | "no_hire" | "strong_no_hire";
comments: string;
createdAt: Date;
updatedAt: Date;
isVisible: boolean;
skillAssessments?: Array<SkillAssessment>;
}
export interface InterviewSchedule {
id?: string;
applicationId?: string;
scheduledDate?: Date;
endDate?: Date;
interviewType?: "phone" | "video" | "onsite" | "technical" | "behavioral";
interviewers?: Array<string>;
applicationId: string;
scheduledDate: Date;
endDate: Date;
interviewType: "phone" | "video" | "onsite" | "technical" | "behavioral";
interviewers: Array<string>;
location?: string | Location;
notes?: string;
feedback?: InterviewFeedback;
status?: "scheduled" | "completed" | "cancelled" | "rescheduled";
status: "scheduled" | "completed" | "cancelled" | "rescheduled";
meetingLink?: string;
}
export interface Job {
id?: string;
title?: string;
description?: string;
responsibilities?: Array<string>;
requirements?: Array<string>;
title: string;
description: string;
responsibilities: Array<string>;
requirements: Array<string>;
preferredSkills?: Array<string>;
employerId?: string;
location?: Location;
employerId: string;
location: Location;
salaryRange?: SalaryRange;
employmentType?: "full-time" | "part-time" | "contract" | "internship" | "freelance";
datePosted?: Date;
employmentType: "full-time" | "part-time" | "contract" | "internship" | "freelance";
datePosted: Date;
applicationDeadline?: Date;
isActive?: boolean;
isActive: boolean;
applicants?: Array<JobApplication>;
department?: string;
reportsTo?: string;
@ -422,12 +446,12 @@ export interface Job {
export interface JobApplication {
id?: string;
jobId?: string;
candidateId?: string;
status?: "applied" | "reviewing" | "interview" | "offer" | "rejected" | "accepted" | "withdrawn";
appliedDate?: Date;
updatedDate?: Date;
resumeVersion?: string;
jobId: string;
candidateId: string;
status: "applied" | "reviewing" | "interview" | "offer" | "rejected" | "accepted" | "withdrawn";
appliedDate: Date;
updatedDate: Date;
resumeVersion: string;
coverLetter?: string;
notes?: string;
interviewSchedules?: Array<InterviewSchedule>;
@ -437,28 +461,28 @@ export interface JobApplication {
}
export interface JobListResponse {
success?: boolean;
success: boolean;
data?: Array<Job>;
error?: ErrorDetail;
meta?: Record<string, any>;
}
export interface JobResponse {
success?: boolean;
success: boolean;
data?: Job;
error?: ErrorDetail;
meta?: Record<string, any>;
}
export interface Language {
language?: string;
proficiency?: "basic" | "conversational" | "fluent" | "native";
language: string;
proficiency: "basic" | "conversational" | "fluent" | "native";
}
export interface Location {
city?: string;
city: string;
state?: string;
country?: string;
country: string;
postalCode?: string;
latitude?: number;
longitude?: number;
@ -468,15 +492,15 @@ export interface Location {
}
export interface MessageReaction {
userId?: string;
reaction?: string;
timestamp?: Date;
userId: string;
reaction: string;
timestamp: Date;
}
export interface NotificationPreference {
type?: "email" | "push" | "in_app";
events?: Array<string>;
isEnabled?: boolean;
type: "email" | "push" | "in_app";
events: Array<string>;
isEnabled: boolean;
}
export interface PaginatedRequest {
@ -488,80 +512,79 @@ export interface PaginatedRequest {
}
export interface PaginatedResponse {
data?: Array<any>;
total?: number;
page?: number;
limit?: number;
totalPages?: number;
hasMore?: boolean;
data: Array<any>;
total: number;
page: number;
limit: number;
totalPages: number;
hasMore: boolean;
}
export interface PointOfContact {
name?: string;
position?: string;
email?: string;
name: string;
position: string;
email: string;
phone?: string;
}
export interface ProcessingStep {
id?: string;
type?: "extract" | "transform" | "chunk" | "embed" | "filter" | "summarize";
parameters?: Record<string, any>;
order?: number;
type: "extract" | "transform" | "chunk" | "embed" | "filter" | "summarize";
parameters: Record<string, any>;
order: number;
dependsOn?: Array<string>;
}
export interface Query {
prompt?: string;
tunables?: Tunables;
agentOptions?: Record<string, any>;
}
export interface RAGConfiguration {
id?: string;
userId?: string;
name?: string;
userId: string;
name: string;
description?: string;
dataSourceConfigurations?: Array<DataSourceConfiguration>;
embeddingModel?: string;
vectorStoreType?: "pinecone" | "qdrant" | "faiss" | "milvus" | "weaviate";
retrievalParameters?: RetrievalParameters;
createdAt?: Date;
updatedAt?: Date;
isDefault?: boolean;
version?: number;
isActive?: boolean;
dataSourceConfigurations: Array<DataSourceConfiguration>;
embeddingModel: string;
vectorStoreType: "pinecone" | "qdrant" | "faiss" | "milvus" | "weaviate";
retrievalParameters: RetrievalParameters;
createdAt: Date;
updatedAt: Date;
version: number;
isActive: boolean;
}
export interface RagEntry {
name: string;
description?: string;
enabled?: boolean;
}
export interface RefreshToken {
token?: string;
expiresAt?: Date;
device?: string;
ipAddress?: string;
isRevoked?: boolean;
token: string;
expiresAt: Date;
device: string;
ipAddress: string;
isRevoked: boolean;
revokedReason?: string;
}
export interface RetrievalParameters {
searchType?: "similarity" | "mmr" | "hybrid" | "keyword";
topK?: number;
searchType: "similarity" | "mmr" | "hybrid" | "keyword";
topK: number;
similarityThreshold?: number;
rerankerModel?: string;
useKeywordBoost?: boolean;
useKeywordBoost: boolean;
filterOptions?: Record<string, any>;
contextWindow?: number;
contextWindow: number;
}
export interface SalaryRange {
min?: number;
max?: number;
currency?: string;
period?: "hour" | "day" | "month" | "year";
isVisible?: boolean;
min: number;
max: number;
currency: string;
period: "hour" | "day" | "month" | "year";
isVisible: boolean;
}
export interface SearchQuery {
query?: string;
query: string;
filters?: Record<string, any>;
page?: number;
limit?: number;
@ -571,21 +594,21 @@ export interface SearchQuery {
export interface Skill {
id?: string;
name?: string;
category?: string;
level?: "beginner" | "intermediate" | "advanced" | "expert";
name: string;
category: string;
level: "beginner" | "intermediate" | "advanced" | "expert";
yearsOfExperience?: number;
}
export interface SkillAssessment {
skillName?: string;
score?: number;
skillName: string;
score: number;
comments?: string;
}
export interface SocialLink {
platform?: "linkedin" | "twitter" | "github" | "dribbble" | "behance" | "website" | "other";
url?: string;
platform: "linkedin" | "twitter" | "github" | "dribbble" | "behance" | "website" | "other";
url: string;
}
export interface Tunables {
@ -598,35 +621,35 @@ export interface UserActivity {
id?: string;
userId?: string;
guestId?: string;
activityType?: "login" | "search" | "view_job" | "apply_job" | "message" | "update_profile" | "chat";
timestamp?: Date;
metadata?: Record<string, any>;
activityType: "login" | "search" | "view_job" | "apply_job" | "message" | "update_profile" | "chat";
timestamp: Date;
metadata: Record<string, any>;
ipAddress?: string;
userAgent?: string;
sessionId?: string;
}
export interface UserPreference {
userId?: string;
theme?: "light" | "dark" | "system";
notifications?: Array<NotificationPreference>;
accessibility?: AccessibilitySettings;
userId: string;
theme: "light" | "dark" | "system";
notifications: Array<NotificationPreference>;
accessibility: AccessibilitySettings;
dashboardLayout?: Record<string, any>;
language?: string;
timezone?: string;
emailFrequency?: "immediate" | "daily" | "weekly" | "never";
language: string;
timezone: string;
emailFrequency: "immediate" | "daily" | "weekly" | "never";
}
export interface WorkExperience {
id?: string;
companyName?: string;
position?: string;
startDate?: Date;
companyName: string;
position: string;
startDate: Date;
endDate?: Date;
isCurrent?: boolean;
description?: string;
skills?: Array<string>;
location?: Location;
isCurrent: boolean;
description: string;
skills: Array<string>;
location: Location;
achievements?: Array<string>;
}

View File

@ -0,0 +1,97 @@
from __future__ import annotations
from pydantic import BaseModel, Field # type: ignore
from typing import (
Literal,
get_args,
List,
AsyncGenerator,
TYPE_CHECKING,
Optional,
ClassVar,
Any,
TypeAlias,
Dict,
Tuple,
)
import importlib
import pathlib
import inspect
from prometheus_client import CollectorRegistry # type: ignore
from database import RedisDatabase
from . base import Agent
from logger import logger
_agents: List[Agent] = []
def get_or_create_agent(agent_type: str, prometheus_collector: CollectorRegistry, database: RedisDatabase, **kwargs) -> Agent:
"""
Get or create and append a new agent of the specified type, ensuring only one agent per type exists.
Args:
agent_type: The type of agent to create (e.g., 'web', 'database').
**kwargs: Additional fields required by the specific agent subclass.
Returns:
The created agent instance.
Raises:
ValueError: If no matching agent type is found or if a agent of this type already exists.
"""
# Check if a agent with the given agent_type already exists
for agent in _agents:
if agent.agent_type == agent_type:
return agent
# Find the matching subclass
for agent_cls in Agent.__subclasses__():
if agent_cls.model_fields["agent_type"].default == agent_type:
# Create the agent instance with provided kwargs
agent = agent_cls(agent_type=agent_type, prometheus_collector=prometheus_collector, database=database, **kwargs)
# if agent.agent_persist: # If an agent is not set to persist, do not add it to the list
_agents.append(agent)
return agent
raise ValueError(f"No agent class found for agent_type: {agent_type}")
# Type alias for Agent or any subclass
AnyAgent: TypeAlias = Agent # BaseModel covers Agent and subclasses
# Maps class_name to (module_name, class_name)
class_registry: Dict[str, Tuple[str, str]] = (
{}
)
__all__ = ['get_or_create_agent']
package_dir = pathlib.Path(__file__).parent
package_name = __name__
for path in package_dir.glob("*.py"):
if path.name in ("__init__.py", "base.py") or path.name.startswith("_"):
continue
module_name = path.stem
full_module_name = f"{package_name}.{module_name}"
try:
module = importlib.import_module(full_module_name)
# Find all Agent subclasses in the module
for name, obj in inspect.getmembers(module, inspect.isclass):
if (
issubclass(obj, AnyAgent)
and obj is not AnyAgent
and obj is not Agent
and name not in class_registry
):
class_registry[name] = (full_module_name, name)
globals()[name] = obj
logger.info(f"Adding agent: {name}")
__all__.append(name) # type: ignore
except ImportError as e:
logger.error(f"Error importing {full_module_name}: {e}")
raise e
except Exception as e:
logger.error(f"Error processing {full_module_name}: {e}")
raise e

605
src/backend/agents/base.py Normal file
View File

@ -0,0 +1,605 @@
from __future__ import annotations
from pydantic import BaseModel, Field, model_validator # type: ignore
from typing import (
Literal,
get_args,
List,
AsyncGenerator,
TYPE_CHECKING,
Optional,
ClassVar,
Any,
TypeAlias,
Dict,
Tuple,
)
import json
import time
import inspect
from abc import ABC
import asyncio
from datetime import datetime, UTC
from prometheus_client import Counter, Summary, CollectorRegistry # type: ignore
from models import ( ChatQuery, ChatMessage, Tunables, ChatStatusType, ChatMessageMetaData)
from logger import logger
import defines
from .registry import agent_registry
from metrics import Metrics
from database import RedisDatabase # type: ignore
class LLMMessage(BaseModel):
role: str = Field(default="")
content: str = Field(default="")
tool_calls: Optional[List[Dict]] = Field(default={}, exclude=True)
class Agent(BaseModel, ABC):
"""
Base class for all agent types.
This class defines the common attributes and methods for all agent types.
"""
class Config:
arbitrary_types_allowed = True # Allow arbitrary types like RedisDatabase
# Agent management with pydantic
agent_type: Literal["base"] = "base"
_agent_type: ClassVar[str] = agent_type # Add this for registration
agent_persist: bool = True # Whether this agent will persist in the database
database: RedisDatabase = Field(
...,
description="Database connection for this agent, used to store and retrieve data."
)
prometheus_collector: CollectorRegistry = Field(..., description="Prometheus collector for this agent, used to track metrics.", exclude=True)
# Tunables (sets default for new Messages attached to this agent)
tunables: Tunables = Field(default_factory=Tunables)
metrics: Metrics = Field(
None, description="Metrics collector for this agent, used to track performance and usage."
)
@model_validator(mode="after")
def initialize_metrics(self) -> "Agent":
if self.metrics is None:
self.metrics = Metrics(prometheus_collector=self.prometheus_collector)
return self
# Agent properties
system_prompt: str # Mandatory
context_tokens: int = 0
# context_size is shared across all subclasses
_context_size: ClassVar[int] = int(defines.max_context * 0.5)
conversation: List[ChatMessage] = Field(
default_factory=list,
description="Conversation history for this agent, used to maintain context across messages."
)
@property
def context_size(self) -> int:
return Agent._context_size
@context_size.setter
def context_size(self, value: int):
Agent._context_size = value
def set_optimal_context_size(
self, llm: Any, model: str, prompt: str, ctx_buffer=2048
) -> int:
# Most models average 1.3-1.5 tokens per word
word_count = len(prompt.split())
tokens = int(word_count * 1.4)
# Add buffer for safety
total_ctx = tokens + ctx_buffer
if total_ctx > self.context_size:
logger.info(
f"Increasing context size from {self.context_size} to {total_ctx}"
)
# Grow the context size if necessary
self.context_size = max(self.context_size, total_ctx)
# Use actual model maximum context size
return self.context_size
# Class and pydantic model management
def __init_subclass__(cls, **kwargs) -> None:
"""Auto-register subclasses"""
super().__init_subclass__(**kwargs)
# Register this class if it has an agent_type
if hasattr(cls, "agent_type") and cls.agent_type != Agent._agent_type:
agent_registry.register(cls.agent_type, cls)
def model_dump(self, *args, **kwargs) -> Any:
# Ensure context is always excluded, even with exclude_unset=True
kwargs.setdefault("exclude", set())
if isinstance(kwargs["exclude"], set):
kwargs["exclude"].add("context")
elif isinstance(kwargs["exclude"], dict):
kwargs["exclude"]["context"] = True
return super().model_dump(*args, **kwargs)
@classmethod
def valid_agent_types(cls) -> set[str]:
"""Return the set of valid agent_type values."""
return set(get_args(cls.__annotations__["agent_type"]))
# Agent methods
def get_agent_type(self):
return self._agent_type
# async def prepare_message(self, message: ChatMessage) -> AsyncGenerator[ChatMessage, None]:
# """
# Prepare message with context information in message.preamble
# """
# logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
# self.metrics.prepare_count.labels(agent=self.agent_type).inc()
# with self.metrics.prepare_duration.labels(agent=self.agent_type).time():
# if not self.context:
# raise ValueError("Context is not set for this agent.")
# # Generate RAG content if enabled, based on the content
# rag_context = ""
# if message.tunables.enable_rag and message.prompt:
# # Gather RAG results, yielding each result
# # as it becomes available
# for message in self.context.user.generate_rag_results(message):
# logger.info(f"RAG: {message.status} - {message.content}")
# if message.status == "error":
# yield message
# return
# if message.status != "done":
# yield message
# # for rag in message.metadata.rag:
# # for doc in rag.documents:
# # rag_context += f"{doc}\n"
# message.preamble = {}
# if rag_context:
# message.preamble["context"] = f"The following is context information about {self.context.user.full_name}:\n{rag_context}"
# if message.tunables.enable_context and self.context.user_resume:
# message.preamble["resume"] = self.context.user_resume
# message.system_prompt = self.system_prompt
# message.status = ChatStatusType.DONE
# yield message
# return
# async def process_tool_calls(
# self,
# llm: Any,
# model: str,
# message: ChatMessage,
# tool_message: Any, # llama response message
# messages: List[LLMMessage],
# ) -> AsyncGenerator[ChatMessage, None]:
# logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
# self.metrics.tool_count.labels(agent=self.agent_type).inc()
# with self.metrics.tool_duration.labels(agent=self.agent_type).time():
# if not self.context:
# raise ValueError("Context is not set for this agent.")
# if not message.metadata.tools:
# raise ValueError("tools field not initialized")
# tool_metadata = message.metadata.tools
# tool_metadata["tool_calls"] = []
# message.status = "tooling"
# for i, tool_call in enumerate(tool_message.tool_calls):
# arguments = tool_call.function.arguments
# tool = tool_call.function.name
# # Yield status update before processing each tool
# message.content = (
# f"Processing tool {i+1}/{len(tool_message.tool_calls)}: {tool}..."
# )
# yield message
# logger.info(f"LLM - {message.content}")
# # Process the tool based on its type
# match tool:
# case "TickerValue":
# ticker = arguments.get("ticker")
# if not ticker:
# ret = None
# else:
# ret = TickerValue(ticker)
# case "AnalyzeSite":
# url = arguments.get("url")
# question = arguments.get(
# "question", "what is the summary of this content?"
# )
# # Additional status update for long-running operations
# message.content = (
# f"Retrieving and summarizing content from {url}..."
# )
# yield message
# ret = await AnalyzeSite(
# llm=llm, model=model, url=url, question=question
# )
# case "GenerateImage":
# prompt = arguments.get("prompt", None)
# if not prompt:
# logger.info("No prompt supplied to GenerateImage")
# ret = { "error": "No prompt supplied to GenerateImage" }
# # Additional status update for long-running operations
# message.content = (
# f"Generating image for {prompt}..."
# )
# yield message
# ret = await GenerateImage(
# llm=llm, model=model, prompt=prompt
# )
# logger.info("GenerateImage returning", ret)
# case "DateTime":
# tz = arguments.get("timezone")
# ret = DateTime(tz)
# case "WeatherForecast":
# city = arguments.get("city")
# state = arguments.get("state")
# message.content = (
# f"Fetching weather data for {city}, {state}..."
# )
# yield message
# ret = WeatherForecast(city, state)
# case _:
# logger.error(f"Requested tool {tool} does not exist")
# ret = None
# # Build response for this tool
# tool_response = {
# "role": "tool",
# "content": json.dumps(ret),
# "name": tool_call.function.name,
# }
# tool_metadata["tool_calls"].append(tool_response)
# if len(tool_metadata["tool_calls"]) == 0:
# message.status = "done"
# yield message
# return
# message_dict = LLMMessage(
# role=tool_message.get("role", "assistant"),
# content=tool_message.get("content", ""),
# tool_calls=[
# {
# "function": {
# "name": tc["function"]["name"],
# "arguments": tc["function"]["arguments"],
# }
# }
# for tc in tool_message.tool_calls
# ],
# )
# messages.append(message_dict)
# messages.extend(tool_metadata["tool_calls"])
# message.status = "thinking"
# message.content = "Incorporating tool results into response..."
# yield message
# # Decrease creativity when processing tool call requests
# message.content = ""
# start_time = time.perf_counter()
# for response in llm.chat(
# model=model,
# messages=messages,
# options={
# **message.metadata.options,
# },
# stream=True,
# ):
# # logger.info(f"LLM::Tools: {'done' if response.done else 'processing'} - {response.message}")
# message.status = "streaming"
# message.chunk = response.message.content
# message.content += message.chunk
# if not response.done:
# yield message
# if response.done:
# self.collect_metrics(response)
# message.metadata.eval_count += response.eval_count
# message.metadata.eval_duration += response.eval_duration
# message.metadata.prompt_eval_count += response.prompt_eval_count
# message.metadata.prompt_eval_duration += response.prompt_eval_duration
# self.context_tokens = (
# response.prompt_eval_count + response.eval_count
# )
# message.status = "done"
# yield message
# end_time = time.perf_counter()
# message.metadata.timers["llm_with_tools"] = end_time - start_time
# return
def collect_metrics(self, response):
self.metrics.tokens_prompt.labels(agent=self.agent_type).inc(
response.prompt_eval_count
)
self.metrics.tokens_eval.labels(agent=self.agent_type).inc(response.eval_count)
async def generate(
self, llm: Any, model: str, query: ChatQuery, session_id: str, user_id: str, temperature=0.7
) -> AsyncGenerator[ChatMessage, None]:
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
chat_message = ChatMessage(
session_id=session_id,
prompt=query.prompt,
tunables=query.tunables,
status=ChatStatusType.PREPARING,
sender="user",
content="",
timestamp=datetime.now(UTC)
)
self.metrics.generate_count.labels(agent=self.agent_type).inc()
with self.metrics.generate_duration.labels(agent=self.agent_type).time():
# Create a pruned down message list based purely on the prompt and responses,
# discarding the full preamble generated by prepare_message
messages: List[LLMMessage] = [
LLMMessage(role="system", content=self.system_prompt)
]
messages.extend(
[
item
for m in self.conversation
for item in [
LLMMessage(role="user", content=m.prompt.strip() if m.prompt else ""),
LLMMessage(role="assistant", content=m.response.strip()),
]
]
)
# Only the actual user query is provided with the full context message
messages.append(
LLMMessage(role="user", content=query.prompt.strip())
)
# message.messages = messages
chat_message.metadata = ChatMessageMetaData()
chat_message.metadata.options = {
"seed": 8911,
"num_ctx": self.context_size,
"temperature": temperature, # Higher temperature to encourage tool usage
}
# Create a dict for storing various timing stats
chat_message.metadata.timers = {}
# use_tools = message.tunables.enable_tools and len(self.context.tools) > 0
# message.metadata.tools = {
# "available": llm_tools(self.context.tools),
# "used": False,
# }
# tool_metadata = message.metadata.tools
# if use_tools:
# message.status = "thinking"
# message.content = f"Performing tool analysis step 1/2..."
# yield message
# logger.info("Checking for LLM tool usage")
# start_time = time.perf_counter()
# # Tools are enabled and available, so query the LLM with a short context of messages
# # in case the LLM did something like ask "Do you want me to run the tool?" and the
# # user said "Yes" -- need to keep the context in the thread.
# tool_metadata["messages"] = (
# [{"role": "system", "content": self.system_prompt}] + messages[-6:]
# if len(messages) >= 7
# else messages
# )
# response = llm.chat(
# model=model,
# messages=tool_metadata["messages"],
# tools=tool_metadata["available"],
# options={
# **message.metadata.options,
# },
# stream=False, # No need to stream the probe
# )
# self.collect_metrics(response)
# end_time = time.perf_counter()
# message.metadata.timers["tool_check"] = end_time - start_time
# if not response.message.tool_calls:
# logger.info("LLM indicates tools will not be used")
# # The LLM will not use tools, so disable use_tools so we can stream the full response
# use_tools = False
# else:
# tool_metadata["attempted"] = response.message.tool_calls
# if use_tools:
# logger.info("LLM indicates tools will be used")
# # Tools are enabled and available and the LLM indicated it will use them
# message.content = (
# f"Performing tool analysis step 2/2 (tool use suspected)..."
# )
# yield message
# logger.info(f"Performing LLM call with tools")
# start_time = time.perf_counter()
# response = llm.chat(
# model=model,
# messages=tool_metadata["messages"], # messages,
# tools=tool_metadata["available"],
# options={
# **message.metadata.options,
# },
# stream=False,
# )
# self.collect_metrics(response)
# end_time = time.perf_counter()
# message.metadata.timers["non_streaming"] = end_time - start_time
# if not response:
# message.status = "error"
# message.content = "No response from LLM."
# yield message
# return
# if response.message.tool_calls:
# tool_metadata["used"] = response.message.tool_calls
# # Process all yielded items from the handler
# start_time = time.perf_counter()
# async for message in self.process_tool_calls(
# llm=llm,
# model=model,
# message=message,
# tool_message=response.message,
# messages=messages,
# ):
# if message.status == "error":
# yield message
# return
# yield message
# end_time = time.perf_counter()
# message.metadata.timers["process_tool_calls"] = end_time - start_time
# message.status = "done"
# return
# logger.info("LLM indicated tools will be used, and then they weren't")
# message.content = response.message.content
# message.status = "done"
# yield message
# return
# not use_tools
chat_message.status = ChatStatusType.THINKING
chat_message.content = f"Generating response..."
yield chat_message
# Reset the response for streaming
chat_message.content = ""
start_time = time.perf_counter()
for response in llm.chat(
model=model,
messages=messages,
options={
**chat_message.metadata.options,
},
stream=True,
):
if not response:
chat_message.status = ChatStatusType.ERROR
chat_message.content = "No response from LLM."
yield chat_message
return
chat_message.status = ChatStatusType.STREAMING
chat_message.chunk = response.message.content
chat_message.content += chat_message.chunk
if not response.done:
yield chat_message
if response.done:
self.collect_metrics(response)
chat_message.metadata.eval_count += response.eval_count
chat_message.metadata.eval_duration += response.eval_duration
chat_message.metadata.prompt_eval_count += response.prompt_eval_count
chat_message.metadata.prompt_eval_duration += response.prompt_eval_duration
self.context_tokens = (
response.prompt_eval_count + response.eval_count
)
chat_message.status = ChatStatusType.DONE
yield chat_message
end_time = time.perf_counter()
chat_message.metadata.timers["streamed"] = end_time - start_time
chat_message.status = ChatStatusType.DONE
self.conversation.append(chat_message)
return
# async def process_message(
# self, llm: Any, model: str, message: Message
# ) -> AsyncGenerator[Message, None]:
# logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
# self.metrics.process_count.labels(agent=self.agent_type).inc()
# with self.metrics.process_duration.labels(agent=self.agent_type).time():
# if not self.context:
# raise ValueError("Context is not set for this agent.")
# logger.info(
# "TODO: Implement delay queing; busy for same agent, otherwise return queue size and estimated wait time"
# )
# spinner: List[str] = ["\\", "|", "/", "-"]
# tick: int = 0
# while self.context.processing:
# message.status = "waiting"
# message.content = (
# f"Busy processing another request. Please wait. {spinner[tick]}"
# )
# tick = (tick + 1) % len(spinner)
# yield message
# await asyncio.sleep(1) # Allow the event loop to process the write
# self.context.processing = True
# message.system_prompt = (
# f"<|system|>\n{self.system_prompt.strip()}\n</|system|>"
# )
# message.context_prompt = ""
# for p in message.preamble.keys():
# message.context_prompt += (
# f"\n<|{p}|>\n{message.preamble[p].strip()}\n</|{p}>\n\n"
# )
# message.context_prompt += f"{message.prompt}"
# # Estimate token length of new messages
# message.content = f"Optimizing context..."
# message.status = "thinking"
# yield message
# message.context_size = self.set_optimal_context_size(
# llm, model, prompt=message.context_prompt
# )
# message.content = f"Processing {'RAG augmented ' if message.metadata.rag else ''}query..."
# message.status = "thinking"
# yield message
# async for message in self.generate_llm_response(
# llm=llm, model=model, message=message
# ):
# # logger.info(f"LLM: {message.status} - {f'...{message.content[-20:]}' if len(message.content) > 20 else message.content}")
# if message.status == "error":
# yield message
# self.context.processing = False
# return
# yield message
# # Done processing, add message to conversation
# message.status = "done"
# self.conversation.add(message)
# self.context.processing = False
# return
# Register the base agent
agent_registry.register(Agent._agent_type, Agent)

View File

@ -0,0 +1,88 @@
from __future__ import annotations
from typing import Literal, AsyncGenerator, ClassVar, Optional, Any
from datetime import datetime
import inspect
from .base import Agent, agent_registry
from logger import logger
from .registry import agent_registry
from models import ( ChatQuery, ChatMessage, Tunables, ChatStatusType)
system_message = f"""
Launched on {datetime.now().isoformat()}.
When answering queries, follow these steps:
- First analyze the query to determine if real-time information from the tools might be helpful
- Even when <|context|> or <|resume|> is provided, consider whether the tools would provide more current or comprehensive information
- Use the provided tools whenever they would enhance your response, regardless of whether context is also available
- When presenting weather forecasts, include relevant emojis immediately before the corresponding text. For example, for a sunny day, say \"☀️ Sunny\" or if the forecast says there will be \"rain showers, say \"🌧️ Rain showers\". Use this mapping for weather emojis: Sunny: ☀️, Cloudy: ☁️, Rainy: 🌧️, Snowy: ❄️
- When any combination of <|context|>, <|resume|> and tool outputs are relevant, synthesize information from all sources to provide the most complete answer
- Always prioritize the most up-to-date and relevant information, whether it comes from <|context|>, <|resume|> or tools
- If <|context|> and tool outputs contain conflicting information, prefer the tool outputs as they likely represent more current data
- If there is information in the <|context|> or <|resume|> sections to enhance the answer, incorporate it seamlessly and refer to it as 'the latest information' or 'recent data' instead of mentioning '<|context|>' (etc.) or quoting it directly.
- Avoid phrases like 'According to the <|context|>' or similar references to the <|context|> or <|resume|>.
CRITICAL INSTRUCTIONS FOR IMAGE GENERATION:
1. When the user requests to generate an image, inject the following into the response: <GenerateImage prompt="USER-PROMPT"/>. Do this when users request images, drawings, or visual content.
3. MANDATORY: You must respond with EXACTLY this format: <GenerateImage prompt="{{USER-PROMPT}}"/>
4. FORBIDDEN: DO NOT use markdown image syntax ![](url)
5. FORBIDDEN: DO NOT create fake URLs or file paths
6. FORBIDDEN: DO NOT use any other image embedding format
CORRECT EXAMPLE:
User: "Draw a cat"
Your response: "<GenerateImage prompt='Draw a cat'/>"
WRONG EXAMPLES (DO NOT DO THIS):
- ![](https://example.com/...)
- ![Cat image](any_url)
- <img src="...">
The <GenerateImage prompt="{{USER-PROMPT}}"/> format is the ONLY way to display images in this system.
DO NOT make up a URL for an image or provide markdown syntax for embedding an image. Only use <GenerateImage prompt="{{USER-PROMPT}}".
Always use tools, <|resume|>, and <|context|> when possible. Be concise, and never make up information. If you do not know the answer, say so.
"""
class Chat(Agent):
"""
Chat Agent
"""
agent_type: Literal["general"] = "general" # type: ignore
_agent_type: ClassVar[str] = agent_type # Add this for registration
system_prompt: str = system_message
# async def prepare_message(self, message: Message) -> AsyncGenerator[Message, None]:
# logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
# if not self.context:
# raise ValueError("Context is not set for this agent.")
# async for message in super().prepare_message(message):
# if message.status != "done":
# yield message
# if message.preamble:
# excluded = {}
# preamble_types = [
# f"<|{p}|>" for p in message.preamble.keys() if p not in excluded
# ]
# preamble_types_AND = " and ".join(preamble_types)
# preamble_types_OR = " or ".join(preamble_types)
# message.preamble[
# "rules"
# ] = f"""\
# - Answer the question based on the information provided in the {preamble_types_AND} sections by incorporate it seamlessly and refer to it using natural language instead of mentioning {preamble_types_OR} or quoting it directly.
# - If there is no information in these sections, answer based on your knowledge, or use any available tools.
# - Avoid phrases like 'According to the {preamble_types[0]}' or similar references to the {preamble_types_OR}.
# """
# message.preamble["question"] = "Respond to:"
# Register the base agent
agent_registry.register(Chat._agent_type, Chat)

View File

@ -0,0 +1,33 @@
from __future__ import annotations
from typing import List, Dict, Optional, Type
# We'll use a registry pattern rather than hardcoded strings
class AgentRegistry:
"""Registry for agent types and classes"""
_registry: Dict[str, Type] = {}
@classmethod
def register(cls, agent_type: str, agent_class: Type) -> Type:
"""Register an agent class with its type"""
cls._registry[agent_type] = agent_class
return agent_class
@classmethod
def get_class(cls, agent_type: str) -> Optional[Type]:
"""Get the class for a given agent type"""
return cls._registry.get(agent_type)
@classmethod
def get_types(cls) -> List[str]:
"""Get all registered agent types"""
return list(cls._registry.keys())
@classmethod
def get_classes(cls) -> Dict[str, Type]:
"""Get all registered agent classes"""
return cls._registry.copy()
# Create a singleton instance
agent_registry = AgentRegistry()

View File

@ -8,7 +8,7 @@ import sys
from datetime import datetime
from models import (
UserStatus, UserType, SkillLevel, EmploymentType,
Candidate, Employer, Location, Skill, AIParameters, AIModelType
Candidate, Employer, Location, Skill, AIModelType
)
@ -120,40 +120,22 @@ def test_validation_constraints():
"""Test that validation constraints work"""
print("\n🔒 Testing validation constraints...")
# Test AI Parameters with constraints
valid_params = AIParameters(
name="Test Config",
model=AIModelType.QWEN2_5,
temperature=0.7, # Valid: 0-1
maxTokens=2000, # Valid: > 0
topP=0.95, # Valid: 0-1
frequencyPenalty=0.0, # Valid: -2 to 2
presencePenalty=0.0, # Valid: -2 to 2
isDefault=True,
createdAt=datetime.now(),
updatedAt=datetime.now()
)
print(f"✅ Valid AI parameters created")
# Test constraint violation
try:
invalid_params = AIParameters(
name="Invalid Config",
model=AIModelType.QWEN2_5,
temperature=1.5, # Invalid: > 1
maxTokens=2000,
topP=0.95,
frequencyPenalty=0.0,
presencePenalty=0.0,
isDefault=True,
# Create a candidate with invalid email
invalid_candidate = Candidate(
email="invalid-email",
username="test_invalid",
createdAt=datetime.now(),
updatedAt=datetime.now()
updatedAt=datetime.now(),
status=UserStatus.ACTIVE,
firstName="Jane",
lastName="Doe",
fullName="Jane Doe"
)
print("❌ Should have rejected invalid temperature")
print("❌ Validation should have failed but didn't")
return False
except Exception:
print(f"✅ Constraint validation working")
except ValueError as e:
print(f"✅ Validation error caught: {e}")
return True
def test_enum_values():
@ -201,6 +183,7 @@ def main():
print(f"\n❌ Test failed: {type(e).__name__}: {e}")
import traceback
traceback.print_exc()
print(f"\n{traceback.format_exc()}")
return False
if __name__ == "__main__":

View File

@ -195,48 +195,107 @@ def snake_to_camel(snake_str: str) -> str:
components = snake_str.split('_')
return components[0] + ''.join(x.title() for x in components[1:])
def is_field_optional(field_info: Any, field_type: Any) -> bool:
def is_field_optional(field_info: Any, field_type: Any, debug: bool = False) -> bool:
"""Determine if a field should be optional in TypeScript"""
if debug:
print(f" 🔍 Analyzing field optionality:")
# First, check if the type itself is Optional (Union with None)
origin = get_origin(field_type)
args = get_args(field_type)
is_union_with_none = origin is Union and type(None) in args
if debug:
print(f" └─ Type is Optional[T]: {is_union_with_none}")
# If the type is Optional[T], it's always optional regardless of Field settings
if is_union_with_none:
if debug:
print(f" └─ RESULT: Optional (type is Optional[T])")
return True
# For non-Optional types, check Field settings and defaults
# Check for default factory (makes field optional)
if hasattr(field_info, 'default_factory') and field_info.default_factory is not None:
has_default_factory = hasattr(field_info, 'default_factory') and field_info.default_factory is not None
if debug:
print(f" └─ Has default factory: {has_default_factory}")
if has_default_factory:
if debug:
print(f" └─ RESULT: Optional (has default factory)")
return True
# Check the default value
# Check the default value - this is the tricky part
if hasattr(field_info, 'default'):
default_val = field_info.default
if debug:
print(f" └─ Has default attribute: {repr(default_val)} (type: {type(default_val)})")
# Field(...) or Ellipsis means REQUIRED (not optional)
if default_val is ...:
# Check for different types of "no default" markers
# Pydantic uses various markers for "no default"
if default_val is ...: # Ellipsis
if debug:
print(f" └─ RESULT: Required (default is Ellipsis)")
return False
# Any other default value (including None) makes it optional
# This covers: Field(None), Field("some_value"), = "some_value", = None, etc.
else:
return True
# Check for Pydantic's internal "PydanticUndefined" or similar markers
default_str = str(default_val)
default_type_str = str(type(default_val))
# If no default is set at all, check if field is explicitly marked as not required
# This is for edge cases in Pydantic v2
if hasattr(field_info, 'is_required'):
try:
return not field_info.is_required()
except:
pass
elif hasattr(field_info, 'required'):
return not field_info.required
# Common patterns for "undefined" in Pydantic
undefined_patterns = [
'PydanticUndefined',
'Undefined',
'_Unset',
'UNSET',
'NotSet',
'_MISSING'
]
# Default: if type is not Optional and no explicit default, it's required (not optional)
is_undefined_marker = any(pattern in default_str or pattern in default_type_str
for pattern in undefined_patterns)
if debug:
print(f" └─ Checking for undefined markers in: {default_str} | {default_type_str}")
print(f" └─ Is undefined marker: {is_undefined_marker}")
if is_undefined_marker:
if debug:
print(f" └─ RESULT: Required (default is undefined marker)")
return False
# Any other actual default value makes it optional
if debug:
print(f" └─ RESULT: Optional (has actual default value)")
return True
else:
if debug:
print(f" └─ No default attribute found")
# If no default attribute exists, check Pydantic's required flag
if hasattr(field_info, 'is_required'):
try:
is_required = field_info.is_required()
if debug:
print(f" └─ is_required(): {is_required}")
return not is_required
except:
if debug:
print(f" └─ is_required() failed")
pass
# Check the 'required' attribute (Pydantic v1 style)
if hasattr(field_info, 'required'):
is_required = field_info.required
if debug:
print(f" └─ required attribute: {is_required}")
return not is_required
# Default: if type is not Optional and no clear default, it's required (not optional)
if debug:
print(f" └─ RESULT: Required (fallback - no Optional type, no default)")
return False
def process_pydantic_model(model_class, debug: bool = False) -> Dict[str, Any]:
@ -271,27 +330,12 @@ def process_pydantic_model(model_class, debug: bool = False) -> Dict[str, Any]:
ts_type = python_type_to_typescript(field_type, debug)
# Check if optional
is_optional = is_field_optional(field_info, field_type)
is_optional = is_field_optional(field_info, field_type, debug)
if debug:
print(f" TS name: {ts_name}")
print(f" TS type: {ts_type}")
print(f" Optional: {is_optional}")
# Debug the optional logic
origin = get_origin(field_type)
args = get_args(field_type)
is_union_with_none = origin is Union and type(None) in args
has_default = hasattr(field_info, 'default')
has_default_factory = hasattr(field_info, 'default_factory') and field_info.default_factory is not None
print(f" └─ Type is Optional: {is_union_with_none}")
if has_default:
default_val = field_info.default
print(f" └─ Has default: {default_val} (is ...? {default_val is ...})")
else:
print(f" └─ No default attribute")
print(f" └─ Has default factory: {has_default_factory}")
print()
properties.append({

View File

@ -0,0 +1,41 @@
import ollama
import defines
_llm = ollama.Client(host=defines.ollama_api_url) # type: ignore
class llm_manager:
"""
A class to manage LLM operations using the Ollama client.
"""
@staticmethod
def get_llm() -> ollama.Client: # type: ignore
"""
Get the Ollama client instance.
Returns:
An instance of the Ollama client.
"""
return _llm
@staticmethod
def get_models() -> list[str]:
"""
Get a list of available models from the Ollama client.
Returns:
List of model names.
"""
return _llm.models()
@staticmethod
def get_model_info(model_name: str) -> dict:
"""
Get information about a specific model.
Args:
model_name: The name of the model to retrieve information for.
Returns:
A dictionary containing model information.
"""
return _llm.model(model_name)

View File

@ -1,7 +1,7 @@
from fastapi import FastAPI, HTTPException, Depends, Query, Path, Body, status, APIRouter, Request # type: ignore
from fastapi.middleware.cors import CORSMiddleware # type: ignore
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials # type: ignore
from fastapi.responses import JSONResponse # type: ignore
from fastapi.responses import JSONResponse, StreamingResponse# type: ignore
from fastapi.staticfiles import StaticFiles # type: ignore
import uvicorn # type: ignore
from typing import List, Optional, Dict, Any
@ -15,6 +15,7 @@ import re
import asyncio
import signal
import json
import traceback
# Prometheus
from prometheus_client import Summary # type: ignore
@ -24,25 +25,24 @@ from prometheus_client import CollectorRegistry, Counter # type: ignore
# Import Pydantic models
from models import (
# User models
Candidate, Employer, BaseUser, Guest, Authentication, AuthResponse,
Candidate, Employer, BaseUserWithType, BaseUser, Guest, Authentication, AuthResponse,
# Job models
Job, JobApplication, ApplicationStatus,
# Chat models
ChatSession, ChatMessage, ChatContext,
# AI models
AIParameters,
ChatSession, ChatMessage, ChatContext, ChatQuery,
# Supporting models
Location, Skill, WorkExperience, Education
)
import defines
import agents
from logger import logger
from database import RedisDatabase, redis_manager, DatabaseManager
from metrics import Metrics
from llm_manager import llm_manager
# Initialize FastAPI app
# ============================
@ -178,7 +178,7 @@ async def verify_token_with_blacklist(credentials: HTTPAuthorizationCredentials
async def get_current_user(
user_id: str = Depends(verify_token_with_blacklist),
database: RedisDatabase = Depends(lambda: db_manager.get_database())
):
) -> BaseUserWithType:
"""Get current user from database"""
try:
# Check candidates first
@ -354,16 +354,17 @@ async def login(
@api_router.post("/auth/logout")
async def logout(
refreshToken: str = Body(..., alias="refreshToken"),
accessToken: Optional[str] = Body(None, alias="accessToken"),
access_token: str = Body(..., alias="accessToken"),
refresh_token: str = Body(..., alias="refreshToken"),
current_user = Depends(get_current_user),
database: RedisDatabase = Depends(get_database)
):
"""Logout endpoint - revokes both access and refresh tokens"""
logger.info(f"🔑 User {current_user.id} is logging out")
try:
# Verify refresh token
try:
refresh_payload = jwt.decode(refreshToken, SECRET_KEY, algorithms=[ALGORITHM])
refresh_payload = jwt.decode(refresh_token, SECRET_KEY, algorithms=[ALGORITHM])
user_id = refresh_payload.get("sub")
token_type = refresh_payload.get("type")
refresh_exp = refresh_payload.get("exp")
@ -394,7 +395,7 @@ async def logout(
refresh_ttl = max(0, refresh_exp - int(datetime.now(UTC).timestamp()))
if refresh_ttl > 0:
await redis_client.setex(
f"blacklisted_token:{refreshToken}",
f"blacklisted_token:{refresh_token}",
refresh_ttl,
json.dumps({
"user_id": user_id,
@ -406,9 +407,9 @@ async def logout(
logger.info(f"🔒 Blacklisted refresh token for user {user_id}")
# If access token is provided, revoke it too
if accessToken:
if access_token:
try:
access_payload = jwt.decode(accessToken, SECRET_KEY, algorithms=[ALGORITHM])
access_payload = jwt.decode(access_token, SECRET_KEY, algorithms=[ALGORITHM])
access_user_id = access_payload.get("sub")
access_exp = access_payload.get("exp")
@ -417,7 +418,7 @@ async def logout(
access_ttl = max(0, access_exp - int(datetime.now(UTC).timestamp()))
if access_ttl > 0:
await redis_client.setex(
f"blacklisted_token:{accessToken}",
f"blacklisted_token:{access_token}",
access_ttl,
json.dumps({
"user_id": user_id,
@ -447,7 +448,7 @@ async def logout(
"message": "Logged out successfully",
"tokensRevoked": {
"refreshToken": True,
"accessToken": bool(accessToken)
"accessToken": bool(access_token)
}
})
@ -905,7 +906,7 @@ async def search_jobs(
@api_router.post("/chat/sessions")
async def create_chat_session(
session_data: Dict[str, Any] = Body(...),
current_user = Depends(get_current_user),
current_user : BaseUserWithType = Depends(get_current_user),
database: RedisDatabase = Depends(get_database)
):
"""Create a new chat session"""
@ -919,6 +920,7 @@ async def create_chat_session(
chat_session = ChatSession.model_validate(session_data)
await database.set_chat_session(chat_session.id, chat_session.model_dump())
logger.info(f"✅ Chat session created: {chat_session.id} for user {current_user.id}")
return create_success_response(chat_session.model_dump(by_alias=True))
except Exception as e:
@ -953,6 +955,116 @@ async def get_chat_session(
content=create_error_response("FETCH_ERROR", str(e))
)
@api_router.get("/chat/sessions/{session_id}/messages")
async def get_chat_session_messages(
session_id: str = Path(...),
current_user = Depends(get_current_user),
page: int = Query(1, ge=1),
limit: int = Query(20, ge=1, le=100),
sortBy: Optional[str] = Query(None, alias="sortBy"),
sortOrder: str = Query("desc", pattern="^(asc|desc)$", alias="sortOrder"),
filters: Optional[str] = Query(None),
database: RedisDatabase = Depends(get_database)
):
"""Get a chat session by ID"""
try:
chat_session_data = await database.get_chat_session(session_id)
if not chat_session_data:
return JSONResponse(
status_code=404,
content=create_error_response("NOT_FOUND", "Chat session not found")
)
chat_messages = await database.get_chat_messages(session_id)
# Convert messages to ChatMessage objects
messages_list = [ChatMessage.model_validate(msg) for msg in chat_messages]
# Apply filters and pagination
filter_dict = None
if filters:
filter_dict = json.loads(filters)
paginated_messages, total = filter_and_paginate(
messages_list, page, limit, sortBy, sortOrder, filter_dict
)
paginated_response = create_paginated_response(
[m.model_dump(by_alias=True) for m in paginated_messages],
page, limit, total
)
return create_success_response(paginated_response)
except Exception as e:
logger.error(f"Get chat session error: {e}")
return JSONResponse(
status_code=500,
content=create_error_response("FETCH_ERROR", str(e))
)
@api_router.post("/chat/sessions/{session_id}/messages/stream")
async def post_chat_session_message_stream(
session_id: str = Path(...),
data: Dict[str, Any] = Body(...),
current_user = Depends(get_current_user),
database: RedisDatabase = Depends(get_database),
request: Request = Request, # For streaming response
):
"""Post a message to a chat session and stream the response"""
try:
chat_session_data = await database.get_chat_session(session_id)
if not chat_session_data:
return JSONResponse(
status_code=404,
content=create_error_response("NOT_FOUND", "Chat session not found")
)
chat_type = chat_session_data.get("context", {}).get("type", "general")
logger.info(f"🔗 Chat session {session_id} type {chat_type} accessed by user {current_user.id}")
query = data.get("query")
if not query:
return JSONResponse(
status_code=400,
content=create_error_response("INVALID_QUERY", "Query cannot be empty")
)
chat_query = ChatQuery.model_validate(query)
chat_agent = agents.get_or_create_agent(agent_type=chat_type, prometheus_collector=prometheus_collector, database=database)
if not chat_agent:
return JSONResponse(
status_code=400,
content=create_error_response("AGENT_NOT_FOUND", "No agent found for this chat type")
)
async def message_stream_generator():
"""Generator to stream messages"""
async for message in chat_agent.generate(
llm=llm_manager.get_llm(),
model=defines.model,
query=chat_query,
session_id=session_id,
user_id=current_user.id,
):
json_data = message.model_dump(mode='json', by_alias=True)
json_str = json.dumps(json_data)
logger.info(f"🔗 Streaming message for session {session_id}: {json_str}")
yield json_str + "\n"
return StreamingResponse(
message_stream_generator(),
media_type="application/json",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Prevents Nginx buffering if you're using it
},
)
except Exception as e:
logger.error(traceback.format_exc())
logger.error(f"Get chat session error: {e}")
return JSONResponse(
status_code=500,
content=create_error_response("FETCH_ERROR", str(e))
)
@api_router.get("/chat/sessions")
async def get_chat_sessions(
page: int = Query(1, ge=1),

View File

@ -1,7 +1,7 @@
from typing import List, Dict, Optional, Any, Union, Literal, TypeVar, Generic, Annotated
from pydantic import BaseModel, Field, EmailStr, HttpUrl, validator # type: ignore
from pydantic.types import constr, conint # type: ignore
from datetime import datetime, date
from datetime import datetime, date, UTC
from enum import Enum
import uuid
@ -68,10 +68,11 @@ class ChatSenderType(str, Enum):
SYSTEM = "system"
class ChatStatusType(str, Enum):
PARTIAL = "partial"
DONE = "done"
STREAMING = "streaming"
PREPARING = "preparing"
THINKING = "thinking"
PARTIAL = "partial"
STREAMING = "streaming"
DONE = "done"
ERROR = "error"
class ChatContextType(str, Enum):
@ -519,47 +520,73 @@ class JobApplication(BaseModel):
class Config:
populate_by_name = True # Allow both field names and aliases
class AIParameters(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
user_id: Optional[str] = Field(None, alias="userId")
class RagEntry(BaseModel):
name: str
description: Optional[str] = None
model: AIModelType
temperature: Optional[Annotated[float, Field(ge=0, le=1)]] = 0.7
max_tokens: Optional[Annotated[int, Field(gt=0)]] = Field(..., alias="maxTokens")
top_p: Optional[Annotated[float, Field(ge=0, le=1)]] = Field(..., alias="topP")
frequency_penalty: Optional[Annotated[float, Field(ge=-2, le=2)]] = Field(..., alias="frequencyPenalty")
presence_penalty: Optional[Annotated[float, Field(ge=-2, le=2)]] = Field(..., alias="presencePenalty")
system_prompt: Optional[str] = Field(None, alias="systemPrompt")
is_default: Optional[bool] = Field(..., alias="isDefault")
created_at: Optional[datetime] = Field(..., alias="createdAt")
updated_at: Optional[datetime] = Field(..., alias="updatedAt")
custom_model_config: Optional[Dict[str, Any]] = Field(None, alias="customModelConfig")
class Config:
populate_by_name = True # Allow both field names and aliases
description: str = ""
enabled: bool = True
class ChromaDBGetResponse(BaseModel):
# Chroma fields
ids: List[str] = []
embeddings: List[List[float]] = Field(default=[])
documents: List[str] = []
metadatas: List[Dict[str, Any]] = []
# Additional fields
name: str = ""
size: int = 0
query: str = ""
query_embedding: Optional[List[float]] = Field(default=None, alias="queryEmbedding")
umap_embedding_2d: Optional[List[float]] = Field(default=None, alias="umapEmbedding2D")
umap_embedding_3d: Optional[List[float]] = Field(default=None, alias="umapEmbedding3D")
class ChatContext(BaseModel):
type: ChatContextType
related_entity_id: Optional[str] = Field(None, alias="relatedEntityId")
related_entity_type: Optional[Literal["job", "candidate", "employer"]] = Field(None, alias="relatedEntityType")
ai_parameters: AIParameters = Field(..., alias="aiParameters")
additional_context: Optional[Dict[str, Any]] = Field(None, alias="additionalContext")
class Config:
populate_by_name = True # Allow both field names and aliases
class ChatOptions(BaseModel):
seed: Optional[int] = 8911
num_ctx: Optional[int] = Field(default=None, alias="numCtx") # Number of context tokens
temperature: Optional[float] = Field(default=0.7) # Higher temperature to encourage tool usage
class ChatMessageMetaData(BaseModel):
model: AIModelType = AIModelType.QWEN2_5
temperature: float = 0.7
max_tokens: int = Field(default=8092, alias="maxTokens")
top_p: float = Field(default=1, alias="topP")
frequency_penalty: Optional[float] = Field(None, alias="frequencyPenalty")
presence_penalty: Optional[float] = Field(None, alias="presencePenalty")
stop_sequences: Optional[List[str]] = Field(None, alias="stopSequences")
tunables: Optional[Tunables] = None
rag: List[ChromaDBGetResponse] = Field(default_factory=list)
eval_count: int = 0
eval_duration: int = 0
prompt_eval_count: int = 0
prompt_eval_duration: int = 0
options: Optional[ChatOptions] = None
tools: Optional[Dict[str, Any]] = None
timers: Optional[Dict[str, float]] = None
class Config:
populate_by_name = True # Allow both field names and aliases
class ChatMessage(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
session_id: str = Field(..., alias="sessionId")
status: ChatStatusType
sender: ChatSenderType
sender_id: Optional[str] = Field(None, alias="senderId")
content: str
prompt: str = ""
content: str = ""
chunk: str = ""
timestamp: datetime
attachments: Optional[List[Attachment]] = None
reactions: Optional[List[MessageReaction]] = None
#attachments: Optional[List[Attachment]] = None
#reactions: Optional[List[MessageReaction]] = None
is_edited: bool = Field(False, alias="isEdited")
edit_history: Optional[List[EditHistory]] = Field(None, alias="editHistory")
metadata: Optional[Dict[str, Any]] = None
#edit_history: Optional[List[EditHistory]] = Field(None, alias="editHistory")
metadata: ChatMessageMetaData = Field(None)
class Config:
populate_by_name = True # Allow both field names and aliases
@ -567,8 +594,8 @@ class ChatSession(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
user_id: Optional[str] = Field(None, alias="userId")
guest_id: Optional[str] = Field(None, alias="guestId")
created_at: datetime = Field(..., alias="createdAt")
last_activity: datetime = Field(..., alias="lastActivity")
created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias="createdAt")
last_activity: datetime = Field(default_factory=lambda: datetime.now(UTC), alias="lastActivity")
title: Optional[str] = None
context: ChatContext
messages: Optional[List[ChatMessage]] = None
@ -614,7 +641,6 @@ class RAGConfiguration(BaseModel):
retrieval_parameters: RetrievalParameters = Field(..., alias="retrievalParameters")
created_at: datetime = Field(..., alias="createdAt")
updated_at: datetime = Field(..., alias="updatedAt")
is_default: bool = Field(..., alias="isDefault")
version: int
is_active: bool = Field(..., alias="isActive")
class Config:
@ -671,7 +697,7 @@ class UserPreference(BaseModel):
# ============================
# API Request/Response Models
# ============================
class Query(BaseModel):
class ChatQuery(BaseModel):
prompt: str
tunables: Optional[Tunables] = None
agent_options: Optional[Dict[str, Any]] = Field(None, alias="agentOptions")

View File

@ -0,0 +1,81 @@
import defines
import re
import subprocess
import math
def get_installed_ram():
try:
with open("/proc/meminfo", "r") as f:
meminfo = f.read()
match = re.search(r"MemTotal:\s+(\d+)", meminfo)
if match:
return f"{math.floor(int(match.group(1)) / 1000**2)}GB" # Convert KB to GB
except Exception as e:
return f"Error retrieving RAM: {e}"
def get_graphics_cards():
gpus = []
try:
# Run the ze-monitor utility
result = subprocess.run(
["ze-monitor"], capture_output=True, text=True, check=True
)
# Clean up the output (remove leading/trailing whitespace and newlines)
output = result.stdout.strip()
for index in range(len(output.splitlines())):
result = subprocess.run(
["ze-monitor", "--device", f"{index+1}", "--info"],
capture_output=True,
text=True,
check=True,
)
gpu_info = result.stdout.strip().splitlines()
gpu = {
"discrete": True, # Assume it's discrete initially
"name": None,
"memory": None,
}
gpus.append(gpu)
for line in gpu_info:
match = re.match(r"^Device: [^(]*\((.*)\)", line)
if match:
gpu["name"] = match.group(1)
continue
match = re.match(r"^\s*Memory: (.*)", line)
if match:
gpu["memory"] = match.group(1)
continue
match = re.match(r"^.*Is integrated with host: Yes.*", line)
if match:
gpu["discrete"] = False
continue
return gpus
except Exception as e:
return f"Error retrieving GPU info: {e}"
def get_cpu_info():
try:
with open("/proc/cpuinfo", "r") as f:
cpuinfo = f.read()
model_match = re.search(r"model name\s+:\s+(.+)", cpuinfo)
cores_match = re.findall(r"processor\s+:\s+\d+", cpuinfo)
if model_match and cores_match:
return f"{model_match.group(1)} with {len(cores_match)} cores"
except Exception as e:
return f"Error retrieving CPU info: {e}"
def system_info():
return {
"System RAM": get_installed_ram(),
"Graphics Card": get_graphics_cards(),
"CPU": get_cpu_info(),
"LLM Model": defines.model,
"Embedding Model": defines.embedding_model,
"Context length": defines.max_context,
}