Almost full persona generation

This commit is contained in:
James Ketr 2025-05-21 17:24:44 -07:00
parent d1e178aa61
commit c7bdc10cb6
25 changed files with 2575 additions and 268 deletions

View File

@ -88,7 +88,6 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/{apt,dpkg,cache,log}
# Prerequisite for ze-monitor
RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install -y \
@ -110,19 +109,22 @@ RUN { \
echo '#!/bin/bash' ; \
echo 'if [[ -e /opt/intel/oneapi/setvars.sh ]]; then source /opt/intel/oneapi/setvars.sh; fi' ; \
echo 'source /opt/backstory/venv/bin/activate' ; \
echo 'if [[ "${1}" != "" ]]; then bash -c "${@}"; else bash; fi' ; \
echo 'if [[ "${1}" != "" ]]; then bash -c "${@}"; else bash -i; fi' ; \
} > /opt/backstory/shell ; \
chmod +x /opt/backstory/shell
# Activate the pip environment on all shell calls
SHELL [ "/opt/backstory/shell" ]
RUN pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/xpu
# https://pytorch-extension.intel.com/installation?platform=gpu&version=v2.7.10%2Bxpu&os=linux%2Fwsl2&package=pip
RUN pip install torch==2.7.0 torchvision==0.22.0 torchaudio==2.7.0 --index-url https://download.pytorch.org/whl/xpu
RUN pip install intel-extension-for-pytorch==2.7.10+xpu oneccl_bind_pt==2.7.0+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
# From https://huggingface.co/docs/bitsandbytes/main/en/installation?backend=Intel+CPU+%2B+GPU#multi-backend
# To use bitsandbytes non-CUDA backends, be sure to install:
RUN pip install "transformers>=4.45.1"
# Note, if you don't want to reinstall BNBs dependencies, append the `--no-deps` flag!
#RUN pip install --force-reinstall 'https://github.com/bitsandbytes-foundation/bitsandbytes/releases/download/continuous-release_multi-backend-refactor/bitsandbytes-0.44.1.dev0-py3-none-manylinux_2_24_x86_64.whl'
RUN pip install --force-reinstall --no-deps "https://github.com/bitsandbytes-foundation/bitsandbytes/releases/download/continuous-release_multi-backend-refactor/bitsandbytes-1.0.0-py3-none-manylinux_2_24_x86_64.whl"
# Install ollama python module
RUN pip install ollama langchain-ollama
@ -154,6 +156,13 @@ RUN pip3 install 'bigdl-core-xe-all>=2.6.0b'
# Required for IPEX optimize(), which is required to convert from Params4bit
RUN pip install einops diffusers
# For image generation...
RUN pip install einops diffusers
RUN pip install sentencepiece # Needed for FLUX
RUN pip install timm
# Install xformers from source
RUN pip install --no-binary xformers xformers
# Needed by src/utils/rag.py
RUN pip install watchdog
@ -187,9 +196,9 @@ RUN { \
echo ' shift' ; \
echo ' echo "Running: ${@}"' ; \
echo ' if [[ "${1}" != "" ]]; then' ; \
echo ' exec ${@}'; \
echo ' bash -c "${@}"'; \
echo ' else' ; \
echo ' exec /bin/bash'; \
echo ' exec /bin/bash -i'; \
echo ' fi' ; \
echo 'else'; \
echo ' if [[ ! -e src/cert.pem ]]; then' ; \
@ -278,7 +287,7 @@ RUN python3 -m venv --system-site-packages /opt/ollama/venv
RUN { \
echo '#!/bin/bash' ; \
echo 'source /opt/ollama/venv/bin/activate' ; \
echo 'if [[ "${1}" != "" ]]; then bash -c ${*}; else bash; fi' ; \
echo 'if [[ "${1}" != "" ]]; then bash -c "${@}"; else bash -i; fi' ; \
} > /opt/ollama/shell ; \
chmod +x /opt/ollama/shell
@ -369,8 +378,6 @@ RUN pip install \
#COPY /src/requirements.txt /opt/backstory/src/requirements.txt
#RUN pip install -r /opt/backstory/src/requirements.txt
RUN pip install timm xformers
SHELL [ "/bin/bash", "-c" ]
RUN { \
@ -387,7 +394,7 @@ RUN { \
echo 'fi' ; \
echo 'if [[ -e /opt/intel/oneapi/setvars.sh ]]; then source /opt/intel/oneapi/setvars.sh; fi' ; \
echo 'source /opt/backstory/venv/bin/activate' ; \
echo 'if [[ "${1}" == "shell" ]]; then echo "Dropping to shell"; /bin/bash; exit $?; fi' ; \
echo 'if [[ "${1}" == "shell" ]]; then echo "Dropping to shell"; /bin/bash -i; exit $?; fi' ; \
echo 'while true; do' ; \
echo ' echo "Launching jupyter lab"' ; \
echo ' jupyter lab \' ; \
@ -406,9 +413,13 @@ RUN { \
echo 'done' ; \
} > /entrypoint-jupyter.sh \
&& chmod +x /entrypoint-jupyter.sh
# echo ' --no-browser \' ; \
WORKDIR /opt/jupyter
ENV PATH=/opt/backstory:$PATH
ENTRYPOINT [ "/entrypoint-jupyter.sh" ]
FROM python AS miniircd
@ -432,7 +443,7 @@ RUN python3 -m venv --system-site-packages /opt/miniircd/venv
RUN { \
echo '#!/bin/bash' ; \
echo 'source /opt/miniircd/venv/bin/activate' ; \
echo 'if [[ "${1}" != "" ]]; then bash -c "${@}"; else bash; fi' ; \
echo 'if [[ "${1}" != "" ]]; then bash -c "${@}"; else bash -i; fi' ; \
} > /opt/miniircd/shell ; \
chmod +x /opt/miniircd/shell
@ -450,14 +461,14 @@ RUN { \
echo 'echo "Setting pip environment to /opt/miniircd"'; \
echo 'source /opt/miniircd/venv/bin/activate'; \
echo ''; \
echo 'if [[ "${1}" == "/bin/bash" ]] || [[ "${1}" =~ ^(/opt/miniircd/)?shell$ ]]; then'; \
echo 'if [[ "${1}" == "/bin/bash -i" ]] || [[ "${1}" =~ ^(/opt/miniircd/)?shell$ ]]; then'; \
echo ' echo "Dropping to shell"'; \
echo ' shift' ; \
echo ' echo "Running: ${@}"' ; \
echo ' if [[ "${1}" != "" ]]; then' ; \
echo ' exec ${@}'; \
echo ' bash -c "${@}"'; \
echo ' else' ; \
echo ' exec /bin/bash'; \
echo ' exec /bin/bash -i'; \
echo ' fi' ; \
echo 'else'; \
echo ' echo "Launching IRC server..."'; \
@ -495,9 +506,9 @@ RUN { \
echo ' shift' ; \
echo ' echo "Running: ${@}"' ; \
echo ' if [[ "${1}" != "" ]]; then' ; \
echo ' exec ${@}'; \
echo ' bash -c "${@}"'; \
echo ' else' ; \
echo ' exec /bin/bash'; \
echo ' exec /bin/bash -i'; \
echo ' fi' ; \
echo 'fi' ; \
echo 'cd /opt/backstory/frontend'; \
@ -525,7 +536,7 @@ WORKDIR /opt/backstory/frontend
RUN { \
echo '#!/bin/bash' ; \
echo 'if [[ "${1}" != "" ]]; then bash -c "${@}"; else bash; fi' ; \
echo 'if [[ "${1}" != "" ]]; then bash -c "${@}"; else bash -i; fi' ; \
} > /opt/backstory/shell ; \
chmod +x /opt/backstory/shell

View File

@ -54,6 +54,7 @@ type BackstoryMessage = {
prompt?: string;
preamble?: {};
status?: string;
remaining_time?: number;
full_content?: string;
response?: string; // Set when status === 'done', 'partial', or 'error'
chunk?: string; // Used when status === 'streaming'

View File

@ -16,7 +16,6 @@ import { BackstoryElementProps } from './BackstoryTab';
interface StyledMarkdownProps extends BackstoryElementProps {
className?: string,
content: string,
sx?: SxProps,
streaming?: boolean,
};

View File

@ -44,6 +44,7 @@ const DefaultNavItems: NavigationLinkType[] = [
const CandidateNavItems : NavigationLinkType[]= [
{ name: 'Chat', path: '/chat', icon: <ChatIcon /> },
{ name: 'Job Analysis', path: '/job-analysis', icon: <WorkIcon /> },
{ name: 'Resume Builder', path: '/resume-builder', icon: <WorkIcon /> },
{ name: 'Knowledge Explorer', path: '/knowledge-explorer', icon: <WorkIcon /> },
{ name: 'Find a Candidate', path: '/find-a-candidate', icon: <InfoIcon /> },
@ -58,6 +59,7 @@ const CandidateNavItems : NavigationLinkType[]= [
const EmployerNavItems: NavigationLinkType[] = [
{ name: 'Chat', path: '/chat', icon: <ChatIcon /> },
{ name: 'Job Analysis', path: '/job-analysis', icon: <WorkIcon /> },
{ name: 'Resume Builder', path: '/resume-builder', icon: <WorkIcon /> },
{ name: 'Knowledge Explorer', path: '/knowledge-explorer', icon: <WorkIcon /> },
{ name: 'Find a Candidate', path: '/find-a-candidate', icon: <InfoIcon /> },

View File

@ -15,6 +15,9 @@ import { VectorVisualizerPage } from 'Pages/VectorVisualizerPage';
import { HomePage } from '../Pages/HomePage';
import { BetaPage } from '../Pages/BetaPage';
import { CandidateListingPage } from '../Pages/CandidateListingPage';
import { JobAnalysisPage } from '../Pages/JobAnalysisPage';
import { DemoComponent } from "NewApp/Pages/DemoComponent";
import { GenerateCandidate } from "NewApp/Pages/GenerateCandiate";
const DashboardPage = () => (<BetaPage><Typography variant="h4">Dashboard</Typography></BetaPage>);
const ProfilePage = () => (<BetaPage><Typography variant="h4">Profile</Typography></BetaPage>);
@ -44,6 +47,8 @@ const getBackstoryDynamicRoutes = (props : BackstoryDynamicRoutesProps, user?: U
<Route key={`${index++}`} path="/resume-builder" element={<ResumeBuilderPage setSnack={setSnack} sessionId={sessionId} submitQuery={submitQuery} />} />,
<Route key={`${index++}`} path="/knowledge-explorer" element={<VectorVisualizerPage setSnack={setSnack} sessionId={sessionId} submitQuery={submitQuery} />} />,
<Route key={`${index++}`} path="/find-a-candidate" element={<CandidateListingPage {...{sessionId, setSnack, submitQuery}} />} />,
<Route key={`${index++}`} path="/job-analysis" element={<JobAnalysisPage />} />,
<Route key={`${index++}`} path="/generate-candidate" element={<GenerateCandidate {...{ sessionId, setSnack, submitQuery }} />} />,
];
if (user === undefined || user === null) {

View File

@ -14,6 +14,7 @@ const StyledPaper = styled(Paper)(({ theme }) => ({
}));
interface CandidateInfoProps {
sessionId: string;
user?: UserInfo;
sx?: SxProps;
action?: string;
@ -23,7 +24,8 @@ const CandidateInfo: React.FC<CandidateInfoProps> = (props: CandidateInfoProps)
const { user } = useUser();
const {
sx,
action = ''
action = '',
sessionId,
} = props;
const location = useLocation();
const navigate = useNavigate();
@ -44,7 +46,7 @@ const CandidateInfo: React.FC<CandidateInfoProps> = (props: CandidateInfoProps)
<Grid container spacing={2}>
<Grid size={{ xs: 12, sm: 2 }} sx={{ display: 'flex', justifyContent: 'center', alignItems: 'center' }}>
<Avatar
src={view.profile_url}
src={view.has_profile ? `/api/u/${view.username}/profile/${sessionId}` : ''}
alt={`${view.full_name}'s profile`}
sx={{
width: 80,

View File

@ -9,19 +9,20 @@ import CancelIcon from '@mui/icons-material/Cancel';
import { SxProps, Theme } from '@mui/material';
import PropagateLoader from "react-spinners/PropagateLoader";
import { Message, MessageList, BackstoryMessage } from '../../Components/Message';
import { Message, MessageList, BackstoryMessage, MessageRoles } from '../../Components/Message';
import { DeleteConfirmation } from '../../Components/DeleteConfirmation';
import { Query } from '../../Components/ChatQuery';
import { BackstoryTextField, BackstoryTextFieldRef } from '../../Components/BackstoryTextField';
import { BackstoryElementProps } from '../../Components/BackstoryTab';
import { connectionBase } from '../../Global';
import { useUser } from "../Components/UserContext";
import { streamQueryResponse, StreamQueryController } from './streamQueryResponse';
import './Conversation.css';
const loadingMessage: BackstoryMessage = { "role": "status", "content": "Establishing connection with server..." };
type ConversationMode = 'chat' | 'job_description' | 'resume' | 'fact_check';
type ConversationMode = 'chat' | 'job_description' | 'resume' | 'fact_check' | 'persona';
interface ConversationHandle {
submitQuery: (query: Query) => void;
@ -82,6 +83,7 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
const viewableElementRef = useRef<HTMLDivElement>(null);
const backstoryTextRef = useRef<BackstoryTextFieldRef>(null);
const stopRef = useRef(false);
const controllerRef = useRef<StreamQueryController>(null);
// Keep the ref updated whenever items changes
useEffect(() => {
@ -223,12 +225,12 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
const query: Query = {
prompt: value
}
sendQuery(query);
processQuery(query);
};
useImperativeHandle(ref, () => ({
submitQuery: (query: Query) => {
sendQuery(query);
processQuery(query);
},
fetchHistory: () => { return fetchHistory(); }
}));
@ -266,25 +268,17 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
const cancelQuery = () => {
console.log("Stop query");
stopRef.current = true;
if (controllerRef.current) {
controllerRef.current.abort();
}
controllerRef.current = null;
};
const sendQuery = async (query: Query) => {
query.prompt = query.prompt.trim();
// If the request was empty, a default request was provided,
// and there is no prompt for the user, send the default request.
if (!query.prompt && defaultQuery && !prompt) {
query.prompt = defaultQuery.trim();
}
// Do not send an empty request.
if (!query.prompt) {
const processQuery = (query: Query) => {
if (controllerRef.current) {
return;
}
stopRef.current = false;
setNoInteractions(false);
setConversation([
@ -297,171 +291,67 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
}
]);
// Add a small delay to ensure React has time to update the UI
await new Promise(resolve => setTimeout(resolve, 0));
setProcessing(true);
try {
setProcessing(true);
setProcessingMessage(
{ role: 'status', content: 'Submitting request...', disableCopy: true }
);
// Add initial processing message
setProcessingMessage(
{ role: 'status', content: 'Submitting request...', disableCopy: true }
);
// Add a small delay to ensure React has time to update the UI
await new Promise(resolve => setTimeout(resolve, 0));
let data: any = query;
if (type === "job_description") {
data = {
prompt: "",
agent_options: {
job_description: query.prompt,
}
}
}
const response = await fetch(`${connectionBase}/api/${type}/${sessionId}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Accept': 'application/json',
},
body: JSON.stringify(data)
});
setSnack(`Query sent.`, "info");
if (!response.ok) {
throw new Error(`Server responded with ${response.status}: ${response.statusText}`);
}
if (!response.body) {
throw new Error('Response body is null');
}
let streaming_response = ""
// Set up stream processing with explicit chunking
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
const process_line = async (line: string) => {
let update = JSON.parse(line);
switch (update.status) {
case 'done':
case 'partial':
if (update.status === 'done') stopCountdown();
if (update.status === 'done') setStreamingMessage(undefined);
if (update.status === 'done') setProcessingMessage(undefined);
const backstoryMessage: BackstoryMessage = update;
controllerRef.current = streamQueryResponse({
query,
type,
sessionId,
connectionBase,
onComplete: (msg) => {
console.log(msg);
switch (msg.status) {
case "done":
case "partial":
setConversation([
...conversationRef.current, {
...backstoryMessage,
...msg,
role: 'assistant',
origin: type,
prompt: ['done', 'partial'].includes(update.status) ? update.prompt : '',
content: backstoryMessage.response || "",
expanded: update.status === "done" ? true : false,
expandable: update.status === "done" ? false : true,
prompt: ['done', 'partial'].includes(msg.status || "") ? msg.prompt : '',
content: msg.response || "",
expanded: msg.status === "done" ? true : false,
expandable: msg.status === "done" ? false : true,
}] as MessageList);
// Add a small delay to ensure React has time to update the UI
await new Promise(resolve => setTimeout(resolve, 0));
const metadata = update.metadata;
startCountdown(Math.ceil(msg.remaining_time || 0));
if (msg.status === "done") {
stopCountdown();
setStreamingMessage(undefined);
setProcessingMessage(undefined);
setProcessing(false);
controllerRef.current = null;
}
if (onResponse) {
onResponse(update);
onResponse(msg);
}
break;
case 'error':
case "error":
// Show error
setConversation([
...conversationRef.current, {
...update,
...msg,
role: 'error',
origin: type,
content: update.response || "",
content: msg.response || "",
}] as MessageList);
setProcessingMessage(msg);
setProcessing(false);
stopCountdown();
// Add a small delay to ensure React has time to update the UI
await new Promise(resolve => setTimeout(resolve, 0));
controllerRef.current = null;
break;
default:
// Force an immediate state update based on the message type
// Update processing message with immediate re-render
if (update.status === "streaming") {
streaming_response += update.chunk
setStreamingMessage({ role: update.status, content: streaming_response, disableCopy: true });
} else {
setProcessingMessage({ role: update.status, content: update.response, disableCopy: true });
/* Reset stream on non streaming message */
streaming_response = ""
}
startCountdown(Math.ceil(update.remaining_time));
// Add a small delay to ensure React has time to update the UI
await new Promise(resolve => setTimeout(resolve, 0));
break;
setProcessingMessage({ role: (msg.status || "error") as MessageRoles, content: msg.response || "", disableCopy: true });
break;
}
},
onStreaming: (chunk) => {
setStreamingMessage({ role: "streaming", content: chunk, disableCopy: true });
}
while (!stopRef.current) {
const { done, value } = await reader.read();
if (done) {
break;
}
const chunk = decoder.decode(value, { stream: true });
// Process each complete line immediately
buffer += chunk;
let lines = buffer.split('\n');
buffer = lines.pop() || ''; // Keep incomplete line in buffer
for (const line of lines) {
if (!line.trim()) continue;
try {
await process_line(line);
} catch (e) {
setSnack("Error processing query", "error")
console.error(e);
}
}
}
// Process any remaining buffer content
if (buffer.trim()) {
try {
await process_line(buffer);
} catch (e) {
setSnack("Error processing query", "error")
console.error(e);
}
}
if (stopRef.current) {
await reader.cancel();
setProcessingMessage(undefined);
setStreamingMessage(undefined);
setSnack("Processing cancelled", "warning");
}
stopCountdown();
setProcessing(false);
stopRef.current = false;
} catch (error) {
console.error('Fetch error:', error);
setSnack("Unable to process query", "error");
setProcessingMessage({ role: 'error', content: "Unable to process query", disableCopy: true });
setTimeout(() => {
setProcessingMessage(undefined);
}, 5000);
stopRef.current = false;
setProcessing(false);
stopCountdown();
return;
}
});
};
return (
@ -479,16 +369,16 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
<Box sx={{ p: 1, mt: 0, overflow: "hidden", ...sx }}>
{
filteredConversation.map((message, index) =>
<Message key={index} expanded={message.expanded === undefined ? true : message.expanded} {...{ sendQuery, message, connectionBase, sessionId, setSnack, submitQuery }} />
<Message key={index} expanded={message.expanded === undefined ? true : message.expanded} {...{ sendQuery: processQuery, message, connectionBase, sessionId, setSnack, submitQuery }} />
)
}
{
processingMessage !== undefined &&
<Message {...{ sendQuery, connectionBase, sessionId, setSnack, message: processingMessage, submitQuery }} />
<Message {...{ sendQuery: processQuery, connectionBase, sessionId, setSnack, message: processingMessage, submitQuery }} />
}
{
streamingMessage !== undefined &&
<Message {...{ sendQuery, connectionBase, sessionId, setSnack, message: streamingMessage, submitQuery }} />
<Message {...{ sendQuery: processQuery, connectionBase, sessionId, setSnack, message: streamingMessage, submitQuery }} />
}
<Box sx={{
display: "flex",
@ -537,7 +427,7 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
sx={{ m: 1, gap: 1, flexGrow: 1 }}
variant="contained"
disabled={sessionId === undefined || processingMessage !== undefined}
onClick={() => { sendQuery({ prompt: (backstoryTextRef.current && backstoryTextRef.current.getAndResetValue()) || "" }); }}>
onClick={() => { processQuery({ prompt: (backstoryTextRef.current && backstoryTextRef.current.getAndResetValue()) || "" }); }}>
{actionLabel}<SendIcon />
</Button>
</span>

View File

@ -0,0 +1,378 @@
import React, { useState, useEffect } from 'react';
import {
Box,
Typography,
Paper,
Accordion,
AccordionSummary,
AccordionDetails,
CircularProgress,
Grid,
Chip,
Divider,
Card,
CardContent,
useTheme,
LinearProgress
} from '@mui/material';
import ExpandMoreIcon from '@mui/icons-material/ExpandMore';
import CheckCircleIcon from '@mui/icons-material/CheckCircle';
import ErrorIcon from '@mui/icons-material/Error';
import PendingIcon from '@mui/icons-material/Pending';
import WarningIcon from '@mui/icons-material/Warning';
// Define TypeScript interfaces for our data structures
interface Citation {
text: string;
source: string;
relevance: number; // 0-100 scale
}
interface SkillMatch {
requirement: string;
status: 'pending' | 'complete' | 'error';
matchScore: number; // 0-100 scale
assessment: string;
citations: Citation[];
}
interface JobAnalysisProps {
jobTitle: string;
candidateName: string;
// This function would connect to your backend and return updates
fetchRequirements: () => Promise<string[]>;
// This function would fetch match data for a specific requirement
fetchMatchForRequirement: (requirement: string) => Promise<SkillMatch>;
}
const JobMatchAnalysis: React.FC<JobAnalysisProps> = ({
jobTitle,
candidateName,
fetchRequirements,
fetchMatchForRequirement
}) => {
const theme = useTheme();
const [requirements, setRequirements] = useState<string[]>([]);
const [skillMatches, setSkillMatches] = useState<SkillMatch[]>([]);
const [loadingRequirements, setLoadingRequirements] = useState<boolean>(true);
const [expanded, setExpanded] = useState<string | false>(false);
const [overallScore, setOverallScore] = useState<number>(0);
// Handle accordion expansion
const handleAccordionChange = (panel: string) => (event: React.SyntheticEvent, isExpanded: boolean) => {
setExpanded(isExpanded ? panel : false);
};
// Fetch initial requirements
useEffect(() => {
const getRequirements = async () => {
try {
const fetchedRequirements = await fetchRequirements();
setRequirements(fetchedRequirements);
// Initialize skill matches with pending status
const initialSkillMatches = fetchedRequirements.map(req => ({
requirement: req,
status: 'pending' as const,
matchScore: 0,
assessment: '',
citations: []
}));
setSkillMatches(initialSkillMatches);
setLoadingRequirements(false);
} catch (error) {
console.error("Error fetching requirements:", error);
setLoadingRequirements(false);
}
};
getRequirements();
}, [fetchRequirements]);
// Fetch match data for each requirement
useEffect(() => {
const fetchMatchData = async () => {
if (requirements.length === 0) return;
// Process requirements one by one
for (let i = 0; i < requirements.length; i++) {
try {
const match = await fetchMatchForRequirement(requirements[i]);
setSkillMatches(prev => {
const updated = [...prev];
updated[i] = match;
return updated;
});
// Update overall score
setSkillMatches(current => {
const completedMatches = current.filter(match => match.status === 'complete');
if (completedMatches.length > 0) {
const newOverallScore = completedMatches.reduce((sum, match) => sum + match.matchScore, 0) / completedMatches.length;
setOverallScore(newOverallScore);
}
return current;
});
} catch (error) {
console.error(`Error fetching match for requirement ${requirements[i]}:`, error);
setSkillMatches(prev => {
const updated = [...prev];
updated[i] = {
...updated[i],
status: 'error',
assessment: 'Failed to analyze this requirement.'
};
return updated;
});
}
}
};
if (!loadingRequirements) {
fetchMatchData();
}
}, [requirements, loadingRequirements, fetchMatchForRequirement]);
// Get color based on match score
const getMatchColor = (score: number): string => {
if (score >= 80) return theme.palette.success.main;
if (score >= 60) return theme.palette.info.main;
if (score >= 40) return theme.palette.warning.main;
return theme.palette.error.main;
};
// Get icon based on status
const getStatusIcon = (status: string, score: number) => {
if (status === 'pending') return <PendingIcon />;
if (status === 'error') return <ErrorIcon color="error" />;
if (score >= 70) return <CheckCircleIcon color="success" />;
if (score >= 40) return <WarningIcon color="warning" />;
return <ErrorIcon color="error" />;
};
return (
<Box sx={{ maxWidth: 1200, margin: '0 auto', p: 2 }}>
<Paper elevation={3} sx={{ p: 3, mb: 4 }}>
<Grid container spacing={2}>
<Grid size={{ xs: 12 }} sx={{ textAlign: 'center', mb: 2 }}>
<Typography variant="h4" component="h1" gutterBottom>
Job Match Analysis
</Typography>
<Divider sx={{ mb: 2 }} />
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="h6" component="h2">
Job: {jobTitle}
</Typography>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<Typography variant="h6" component="h2">
Candidate: {candidateName}
</Typography>
</Grid>
<Grid size={{ xs: 12 }} sx={{ mt: 2 }}>
<Box sx={{ display: 'flex', alignItems: 'center', mb: 2 }}>
<Typography variant="h5" component="h2" sx={{ mr: 2 }}>
Overall Match:
</Typography>
<Box sx={{
position: 'relative',
display: 'inline-flex',
mr: 2
}}>
<CircularProgress
variant="determinate"
value={overallScore}
size={60}
thickness={5}
sx={{
color: getMatchColor(overallScore),
}}
/>
<Box
sx={{
top: 0,
left: 0,
bottom: 0,
right: 0,
position: 'absolute',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
}}
>
<Typography variant="caption" component="div" sx={{ fontWeight: 'bold' }}>
{`${Math.round(overallScore)}%`}
</Typography>
</Box>
</Box>
<Chip
label={
overallScore >= 80 ? "Excellent Match" :
overallScore >= 60 ? "Good Match" :
overallScore >= 40 ? "Partial Match" : "Low Match"
}
sx={{
bgcolor: getMatchColor(overallScore),
color: 'white',
fontWeight: 'bold'
}}
/>
</Box>
</Grid>
</Grid>
</Paper>
{loadingRequirements ? (
<Box sx={{ display: 'flex', justifyContent: 'center', p: 4 }}>
<CircularProgress />
<Typography variant="h6" sx={{ ml: 2 }}>
Analyzing job requirements...
</Typography>
</Box>
) : (
<Box>
<Typography variant="h5" component="h2" gutterBottom>
Requirements Analysis
</Typography>
{skillMatches.map((match, index) => (
<Accordion
key={index}
expanded={expanded === `panel${index}`}
onChange={handleAccordionChange(`panel${index}`)}
sx={{
mb: 2,
border: '1px solid',
borderColor: match.status === 'complete'
? getMatchColor(match.matchScore)
: theme.palette.divider
}}
>
<AccordionSummary
expandIcon={<ExpandMoreIcon />}
aria-controls={`panel${index}bh-content`}
id={`panel${index}bh-header`}
sx={{
bgcolor: match.status === 'complete'
? `${getMatchColor(match.matchScore)}22` // Add transparency
: 'inherit'
}}
>
<Box sx={{
display: 'flex',
alignItems: 'center',
width: '100%',
justifyContent: 'space-between'
}}>
<Box sx={{ display: 'flex', alignItems: 'center' }}>
{getStatusIcon(match.status, match.matchScore)}
<Typography sx={{ ml: 1, fontWeight: 'medium' }}>
{match.requirement}
</Typography>
</Box>
{match.status === 'complete' ? (
<Chip
label={`${match.matchScore}% Match`}
size="small"
sx={{
bgcolor: getMatchColor(match.matchScore),
color: 'white',
minWidth: 90
}}
/>
) : match.status === 'pending' ? (
<Chip
label="Analyzing..."
size="small"
sx={{ bgcolor: theme.palette.grey[400], color: 'white', minWidth: 90 }}
/>
) : (
<Chip
label="Error"
size="small"
sx={{ bgcolor: theme.palette.error.main, color: 'white', minWidth: 90 }}
/>
)}
</Box>
</AccordionSummary>
<AccordionDetails>
{match.status === 'pending' ? (
<Box sx={{ width: '100%', p: 2 }}>
<LinearProgress />
<Typography sx={{ mt: 2 }}>
Analyzing candidate's match for this requirement...
</Typography>
</Box>
) : match.status === 'error' ? (
<Typography color="error">
{match.assessment || "An error occurred while analyzing this requirement."}
</Typography>
) : (
<Box>
<Typography variant="h6" gutterBottom>
Assessment:
</Typography>
<Typography paragraph sx={{ mb: 3 }}>
{match.assessment}
</Typography>
<Typography variant="h6" gutterBottom>
Supporting Evidence:
</Typography>
{match.citations.length > 0 ? (
match.citations.map((citation, citIndex) => (
<Card
key={citIndex}
variant="outlined"
sx={{
mb: 2,
borderLeft: '4px solid',
borderColor: theme.palette.primary.main,
}}
>
<CardContent>
<Typography variant="body1" component="div" sx={{ mb: 1, fontStyle: 'italic' }}>
"{citation.text}"
</Typography>
<Box sx={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center' }}>
<Typography variant="caption" color="text.secondary">
Source: {citation.source}
</Typography>
<Chip
size="small"
label={`Relevance: ${citation.relevance}%`}
sx={{
bgcolor: theme.palette.grey[200],
}}
/>
</Box>
</CardContent>
</Card>
))
) : (
<Typography color="text.secondary">
No specific evidence found in candidate's profile.
</Typography>
)}
</Box>
)}
</AccordionDetails>
</Accordion>
))}
</Box>
)}
</Box>
);
};
export { JobMatchAnalysis };

View File

@ -4,9 +4,13 @@ import { SetSnackType } from '../../Components/Snack';
import { connectionBase } from '../../Global';
// Define the UserInfo interface for type safety
interface UserQuestion {
question: string;
tunables?: Tunables;
};
interface UserInfo {
type: 'candidate' | 'employer' | 'guest';
profile_url: string;
description: string;
rag_content_size: number;
username: string;
@ -14,11 +18,13 @@ interface UserInfo {
last_name: string;
full_name: string;
contact_info: Record<string, string>;
questions: [{
question: string;
tunables?: Tunables
}],
isAuthenticated: boolean
questions: UserQuestion[],
isAuthenticated: boolean,
has_profile: boolean,
// Fields used in AI generated personas
age?: number,
ethnicity?: string,
gender?: string,
};
type UserContextType = {

View File

@ -0,0 +1,161 @@
import { BackstoryMessage } from '../../Components/Message';
import { Query } from '../../Components/ChatQuery';
type StreamQueryOptions = {
query: Query;
type: string;
sessionId: string;
connectionBase: string;
onComplete: (message: BackstoryMessage) => void;
onStreaming?: (message: string) => void;
};
type StreamQueryController = {
abort: () => void
};
const streamQueryResponse = (options: StreamQueryOptions) => {
const {
query,
type,
sessionId,
connectionBase,
onComplete,
onStreaming,
} = options;
const abortController = new AbortController();
const run = async () => {
query.prompt = query.prompt.trim();
if (!query.prompt) return;
let data: any = query;
if (type === "job_description") {
data = {
prompt: "",
agent_options: {
job_description: query.prompt,
},
};
}
try {
const response = await fetch(`${connectionBase}/api/${type}/${sessionId}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Accept': 'application/json',
},
body: JSON.stringify(data),
signal: abortController.signal,
});
if (!response.ok) {
throw new Error(`Server responded with ${response.status}: ${response.statusText}`);
}
if (!response.body) {
throw new Error('Response body is null');
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
let streaming_response = '';
const processLine = async (line: string) => {
const update = JSON.parse(line);
switch (update.status) {
case "streaming":
streaming_response += update.chunk;
onStreaming?.(streaming_response);
break;
case 'error':
const errorMessage: BackstoryMessage = {
...update,
role: 'error',
origin: type,
content: update.response ?? '',
};
onComplete(errorMessage);
break;
default:
const message: BackstoryMessage = {
...update,
role: 'assistant',
origin: type,
prompt: update.prompt ?? '',
content: update.response ?? '',
expanded: update.status === 'done',
expandable: update.status !== 'done',
};
streaming_response = '';
onComplete(message);
break;
}
};
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
if (!line.trim()) continue;
try {
await processLine(line);
} catch (e) {
console.error('Error processing line:', line, e);
}
}
}
if (buffer.trim()) {
try {
await processLine(buffer);
} catch (e) {
console.error('Error processing remaining buffer:', e);
}
}
} catch (error) {
if ((error as any).name === 'AbortError') {
console.log('Query aborted');
onComplete({
role: 'error',
origin: type,
content: 'Query was cancelled.',
response: error,
status: 'error',
} as BackstoryMessage);
} else {
console.error('Fetch error:', error);
onComplete({
role: 'error',
origin: type,
content: 'Unable to process query',
response: "" + error,
status: 'error',
} as BackstoryMessage);
}
}
};
run();
return {
abort: () => abortController.abort(),
};
};
export type {
StreamQueryController
};
export { streamQueryResponse };

View File

@ -70,7 +70,7 @@ const CandidateListingPage = (props: BackstoryPageProps) => {
}}
sx={{ cursor: "pointer" }}
>
<CandidateInfo sx={{ "cursor": "pointer", "&:hover": { border: "2px solid orange" }, border: "2px solid transparent"}} user={u}/>
<CandidateInfo sessionId={sessionId} sx={{ "cursor": "pointer", "&:hover": { border: "2px solid orange" }, border: "2px solid transparent" }} user={u} />
</Box>
)}
</Box>

View File

@ -46,7 +46,7 @@ const ChatPage = forwardRef<ConversationHandle, BackstoryPageProps>((props: Back
}
return (
<Box>
<CandidateInfo action="Chat with AI about " />
<CandidateInfo sessionId={sessionId} action="Chat with Backstory AI about " />
<Conversation
ref={ref}
{...{

View File

@ -0,0 +1,153 @@
import React from 'react';
import {JobMatchAnalysis} from '../Components/JobMatchAnalysis';
// Mock data and functions to simulate your backend
const mockRequirements = [
"5+ years of React development experience",
"Strong TypeScript skills",
"Experience with RESTful APIs",
"Knowledge of state management solutions (Redux, Context API)",
"Experience with CI/CD pipelines",
"Cloud platform experience (AWS, Azure, GCP)"
];
// Simulates fetching requirements with a delay
const mockFetchRequirements = async (): Promise<string[]> => {
return new Promise((resolve) => {
setTimeout(() => {
resolve(mockRequirements);
}, 1500); // Simulate network delay
});
};
// Simulates fetching match data for a requirement with varying delays
const mockFetchMatchForRequirement = async (requirement: string): Promise<any> => {
// Create different mock responses based on the requirement
const mockResponses: Record<string, any> = {
"5+ years of React development experience": {
requirement: "5+ years of React development experience",
status: "complete",
matchScore: 85,
assessment: "The candidate demonstrates extensive React experience spanning over 6 years, with a strong portfolio of complex applications and deep understanding of React's component lifecycle and hooks.",
citations: [
{
text: "Led frontend development team of 5 engineers to rebuild our customer portal using React and TypeScript, resulting in 40% improved performance and 30% reduction in bugs.",
source: "Resume, Work Experience",
relevance: 95
},
{
text: "Developed and maintained reusable React component library used across 12 different products within the organization.",
source: "Resume, Work Experience",
relevance: 90
},
{
text: "I've been working with React since 2017, building everything from small widgets to enterprise applications.",
source: "Cover Letter",
relevance: 85
}
]
},
"Strong TypeScript skills": {
requirement: "Strong TypeScript skills",
status: "complete",
matchScore: 90,
assessment: "The candidate shows excellent TypeScript proficiency through their work history and personal projects. They have implemented complex type systems and demonstrate an understanding of advanced TypeScript features.",
citations: [
{
text: "Converted a legacy JavaScript codebase of 100,000+ lines to TypeScript, implementing strict type checking and reducing runtime errors by 70%.",
source: "Resume, Projects",
relevance: 98
},
{
text: "Created comprehensive TypeScript interfaces for our GraphQL API, ensuring type safety across the entire application stack.",
source: "Resume, Technical Skills",
relevance: 95
}
]
},
"Experience with RESTful APIs": {
requirement: "Experience with RESTful APIs",
status: "complete",
matchScore: 75,
assessment: "The candidate has good experience with RESTful APIs, having both consumed and designed them. They understand REST principles but have less documented experience with API versioning and caching strategies.",
citations: [
{
text: "Designed and implemented a RESTful API serving over 1M requests daily with a focus on performance and scalability.",
source: "Resume, Technical Projects",
relevance: 85
},
{
text: "Worked extensively with third-party APIs including Stripe, Twilio, and Salesforce to integrate payment processing and communication features.",
source: "Resume, Work Experience",
relevance: 70
}
]
},
"Knowledge of state management solutions (Redux, Context API)": {
requirement: "Knowledge of state management solutions (Redux, Context API)",
status: "complete",
matchScore: 65,
assessment: "The candidate has moderate experience with state management, primarily using Redux. There is less evidence of Context API usage, which could indicate a knowledge gap in more modern React state management approaches.",
citations: [
{
text: "Implemented Redux for global state management in an e-commerce application, handling complex state logic for cart, user preferences, and product filtering.",
source: "Resume, Skills",
relevance: 80
},
{
text: "My experience includes working with state management libraries like Redux and MobX.",
source: "Cover Letter",
relevance: 60
}
]
},
"Experience with CI/CD pipelines": {
requirement: "Experience with CI/CD pipelines",
status: "complete",
matchScore: 40,
assessment: "The candidate shows limited experience with CI/CD pipelines. While they mention some exposure to Jenkins and GitLab CI, there is insufficient evidence of setting up or maintaining comprehensive CI/CD workflows.",
citations: [
{
text: "Familiar with CI/CD tools including Jenkins and GitLab CI.",
source: "Resume, Skills",
relevance: 40
}
]
},
"Cloud platform experience (AWS, Azure, GCP)": {
requirement: "Cloud platform experience (AWS, Azure, GCP)",
status: "complete",
matchScore: 30,
assessment: "The candidate demonstrates minimal experience with cloud platforms. There is a brief mention of AWS S3 and Lambda, but no substantial evidence of deeper cloud architecture knowledge or experience with Azure or GCP.",
citations: [
{
text: "Used AWS S3 for file storage and Lambda for image processing in a photo sharing application.",
source: "Resume, Projects",
relevance: 35
}
]
}
};
// Return a promise that resolves with the mock data after a delay
return new Promise((resolve) => {
// Different requirements resolve at different speeds to simulate real-world analysis
const delay = Math.random() * 5000 + 2000; // 2-7 seconds
setTimeout(() => {
resolve(mockResponses[requirement]);
}, delay);
});
};
const DemoComponent: React.FC = () => {
return (
<JobMatchAnalysis
jobTitle="Senior Frontend Developer"
candidateName="Alex Johnson"
fetchRequirements={mockFetchRequirements}
fetchMatchForRequirement={mockFetchMatchForRequirement}
/>
);
};
export { DemoComponent };

View File

@ -0,0 +1,240 @@
import React, { useEffect, useState, useRef } from 'react';
import Box from '@mui/material/Box';
import Tooltip from '@mui/material/Tooltip';
import Button from '@mui/material/Button';
import IconButton from '@mui/material/IconButton';
import CancelIcon from '@mui/icons-material/Cancel';
import SendIcon from '@mui/icons-material/Send';
import PropagateLoader from 'react-spinners/PropagateLoader';
import { CandidateInfo } from '../Components/CandidateInfo';
import { Query } from '../../Components/ChatQuery'
import { streamQueryResponse, StreamQueryController } from '../Components/streamQueryResponse';
import { connectionBase } from 'Global';
import { UserInfo } from '../Components/UserContext';
import { BackstoryElementProps } from 'Components/BackstoryTab';
import { BackstoryTextField, BackstoryTextFieldRef } from 'Components/BackstoryTextField';
import { jsonrepair } from 'jsonrepair';
import { StyledMarkdown } from 'Components/StyledMarkdown';
import { Scrollable } from 'Components/Scrollable';
import { useForkRef } from '@mui/material';
const emptyUser : UserInfo = {
type: 'candidate',
description: "[blank]",
rag_content_size: 0,
username: "[blank]",
first_name: "[blank]",
last_name: "[blank]",
full_name: "[blank] [blank]",
contact_info: {},
questions: [],
isAuthenticated: false,
has_profile: false
};
const GenerateCandidate = (props: BackstoryElementProps) => {
const {sessionId, setSnack, submitQuery} = props;
const [streaming, setStreaming] = useState<string>('');
const [processing, setProcessing] = useState<boolean>(false);
const [user, setUser] = useState<UserInfo>(emptyUser);
const controllerRef = useRef<StreamQueryController>(null);
const backstoryTextRef = useRef<BackstoryTextFieldRef>(null);
const promptRef = useRef<string>(null);
const stateRef = useRef<number>(0); /* Generating persona */
const userRef = useRef<UserInfo>(user);
const [prompt, setPrompt] = useState<string>('');
const [resume, setResume] = useState<string>('');
const processQuery = (query: Query) => {
if (controllerRef.current) {
return;
}
setPrompt(query.prompt);
promptRef.current = query.prompt;
stateRef.current = 0;
setUser(emptyUser);
setStreaming('');
setResume('');
setProcessing(true);
controllerRef.current = streamQueryResponse({
query,
type: "persona",
sessionId,
connectionBase,
onComplete: (msg) => {
console.log({ msg, state: stateRef.current, prompt: promptRef.current || '' });
switch (msg.status) {
case "partial":
case "done":
switch (stateRef.current) {
case 0: /* Generating persona */
let partialUser = JSON.parse(jsonrepair((msg.response || '').trim()));
if (!partialUser.full_name) {
partialUser.full_name = `${partialUser.first_name} ${partialUser.last_name}`;
}
console.log(partialUser);
setUser(partialUser);
stateRef.current = 1 /* Generating resume */
break;
case 1: /* Generating resume */
stateRef.current = 2 /* RAG generation */
break;
case 2: /* RAG generation */
stateRef.current = 2 /* Image generation */
break;
case 3: /* Generating image */
let imageGeneration = JSON.parse(jsonrepair((msg.response || '').trim()));
console.log(imageGeneration);
if (imageGeneration >= 100) {
setUser({...userRef.current});
} else {
setPrompt(imageGeneration.status);
}
stateRef.current = 3 /* ... */
}
if (msg.status === "done") {
setProcessing(false);
controllerRef.current = null;
stateRef.current = 0;
}
break;
case "thinking":
setPrompt(msg.response || '');
break;
case "error":
console.log(`Error generating persona: ${msg.response}`);
setSnack(msg.response || "", "error");
setProcessing(false);
setUser({...userRef.current});
controllerRef.current = null;
stateRef.current = 0;
break;
}
},
onStreaming: (chunk) => {
setStreaming(chunk);
}
});
};
const cancelQuery = () => {
if (controllerRef.current) {
controllerRef.current.abort();
controllerRef.current = null;
stateRef.current = 0;
setProcessing(false);
}
}
useEffect(() => {
promptRef.current = prompt;
}, [prompt]);
useEffect(() => {
userRef.current = user;
}, [user]);
useEffect(() => {
if (streaming.trim().length === 0) {
return;
}
try {
switch (stateRef.current) {
case 0: /* Generating persona */
const partialUser = {...emptyUser, ...JSON.parse(jsonrepair(`${streaming.trim()}...`))};
if (!partialUser.full_name) {
partialUser.full_name = `${partialUser.first_name} ${partialUser.last_name}`;
}
setUser(partialUser);
break;
case 1: /* Generating resume */
setResume(streaming);
break;
case 3: /* RAG streaming */
break;
case 4: /* Image streaming */
break;
}
} catch {
}
}, [streaming]);
if (!sessionId) {
return <></>;
}
const onEnter = (value: string) => {
if (processing) {
return;
}
const query: Query = {
prompt: value
}
processQuery(query);
};
return (<>
{ user && <CandidateInfo sessionId={sessionId} user={user}/> }
{ resume !== '' && <Scrollable sx={{maxHeight: "20vh"}}><StyledMarkdown {...{content: resume, setSnack, sessionId, submitQuery}}/></Scrollable> }
{processing && <Box sx={{
display: "flex",
flexDirection: "column",
alignItems: "center",
justifyContent: "center",
m: 2,
}}>
<Box sx={{flexDirection: "row"}}><Box>Genearating
{stateRef.current === 0 && "persona"}
{stateRef.current === 1 && "resume"}
{stateRef.current === 2 && "RAG"}
{stateRef.current === 3 && "profile image"}
:</Box><Box sx={{fontWeight: "bold"}}>{prompt}</Box></Box>
<PropagateLoader
size="10px"
loading={processing}
aria-label="Loading Spinner"
data-testid="loader"
/>
</Box> }
<BackstoryTextField
ref={backstoryTextRef}
disabled={processing}
onEnter={onEnter}
placeholder='Specify any characteristics you would like the persona to have. For example, "This person likes yo-yos."'
/>
<Box sx={{ display: "flex", justifyContent: "center", flexDirection: "row" }}>
<Tooltip title={"Send"}>
<span style={{ display: "flex", flexGrow: 1 }}>
<Button
sx={{ m: 1, gap: 1, flexGrow: 1 }}
variant="contained"
disabled={sessionId === undefined || processing}
onClick={() => { processQuery({ prompt: (backstoryTextRef.current && backstoryTextRef.current.getAndResetValue()) || "" }); }}>
Send<SendIcon />
</Button>
</span>
</Tooltip>
<Tooltip title="Cancel">
<span style={{ display: "flex" }}> { /* This span is used to wrap the IconButton to ensure Tooltip works even when disabled */}
<IconButton
aria-label="cancel"
onClick={() => { cancelQuery(); }}
sx={{ display: "flex", margin: 'auto 0px' }}
size="large"
edge="start"
disabled={controllerRef.current === null || !sessionId || processing === false}
>
<CancelIcon />
</IconButton>
</span>
</Tooltip>
</Box>
</>);
};
export {
GenerateCandidate
};

View File

@ -0,0 +1,677 @@
import React, { useState, useEffect } from 'react';
import {
Box,
Stepper,
Step,
StepLabel,
Button,
Typography,
Paper,
TextField,
Grid,
Card,
CardContent,
CardActionArea,
Avatar,
Divider,
CircularProgress,
Container,
useTheme,
Snackbar,
Alert,
Dialog,
DialogTitle,
DialogContent,
DialogContentText,
DialogActions,
InputAdornment,
IconButton
} from '@mui/material';
import SearchIcon from '@mui/icons-material/Search';
import PersonIcon from '@mui/icons-material/Person';
import WorkIcon from '@mui/icons-material/Work';
import AssessmentIcon from '@mui/icons-material/Assessment';
import DescriptionIcon from '@mui/icons-material/Description';
import FileUploadIcon from '@mui/icons-material/FileUpload';
import {JobMatchAnalysis} from '../Components/JobMatchAnalysis';
// Mock types for our application
interface Candidate {
id: string;
name: string;
title: string;
location: string;
email: string;
phone: string;
photoUrl?: string;
resume?: string;
}
interface User {
id: string;
name: string;
company: string;
role: string;
}
// Mock hook for getting the current user
const useUser = (): { user: User | null, loading: boolean } => {
// In a real app, this would check auth state and get user info
const [loading, setLoading] = useState(true);
const [user, setUser] = useState<User | null>(null);
useEffect(() => {
// Simulate fetching user data
setTimeout(() => {
setUser({
id: 'emp123',
name: 'Sarah Thompson',
company: 'Tech Innovations Inc.',
role: 'HR Manager'
});
setLoading(false);
}, 800);
}, []);
return { user, loading };
};
// Mock API for fetching candidates
const fetchCandidates = async (searchQuery: string = ''): Promise<Candidate[]> => {
// Simulate API delay
await new Promise(resolve => setTimeout(resolve, 1000));
const mockCandidates: Candidate[] = [
{
id: 'c1',
name: 'Alex Johnson',
title: 'Senior Frontend Developer',
location: 'Seattle, WA',
email: 'alex.johnson@example.com',
phone: '(555) 123-4567',
photoUrl: 'https://i.pravatar.cc/150?img=11'
},
{
id: 'c2',
name: 'Morgan Williams',
title: 'Full Stack Engineer',
location: 'Portland, OR',
email: 'morgan.w@example.com',
phone: '(555) 234-5678',
photoUrl: 'https://i.pravatar.cc/150?img=12'
},
{
id: 'c3',
name: 'Jamie Garcia',
title: 'DevOps Specialist',
location: 'San Francisco, CA',
email: 'jamie.g@example.com',
phone: '(555) 345-6789',
photoUrl: 'https://i.pravatar.cc/150?img=13'
},
{
id: 'c4',
name: 'Taylor Chen',
title: 'Backend Developer',
location: 'Austin, TX',
email: 'taylor.c@example.com',
phone: '(555) 456-7890',
photoUrl: 'https://i.pravatar.cc/150?img=14'
},
{
id: 'c5',
name: 'Jordan Smith',
title: 'UI/UX Developer',
location: 'Chicago, IL',
email: 'jordan.s@example.com',
phone: '(555) 567-8901',
photoUrl: 'https://i.pravatar.cc/150?img=15'
}
];
if (!searchQuery) return mockCandidates;
// Filter candidates based on search query
return mockCandidates.filter(candidate =>
candidate.name.toLowerCase().includes(searchQuery.toLowerCase()) ||
candidate.title.toLowerCase().includes(searchQuery.toLowerCase()) ||
candidate.location.toLowerCase().includes(searchQuery.toLowerCase())
);
};
// Main component
const JobAnalysisPage: React.FC = () => {
const theme = useTheme();
const { user, loading: userLoading } = useUser();
// State management
const [activeStep, setActiveStep] = useState(0);
const [candidates, setCandidates] = useState<Candidate[]>([]);
const [selectedCandidate, setSelectedCandidate] = useState<Candidate | null>(null);
const [searchQuery, setSearchQuery] = useState('');
const [loadingCandidates, setLoadingCandidates] = useState(false);
const [jobDescription, setJobDescription] = useState('');
const [jobTitle, setJobTitle] = useState('');
const [jobLocation, setJobLocation] = useState('');
const [analysisStarted, setAnalysisStarted] = useState(false);
const [error, setError] = useState<string | null>(null);
const [openUploadDialog, setOpenUploadDialog] = useState(false);
// Steps in our process
const steps = [
{ label: 'Select Candidate', icon: <PersonIcon /> },
{ label: 'Job Description', icon: <WorkIcon /> },
{ label: 'View Analysis', icon: <AssessmentIcon /> }
];
// Load initial candidates
useEffect(() => {
const loadCandidates = async () => {
setLoadingCandidates(true);
try {
const data = await fetchCandidates();
setCandidates(data);
} catch (err) {
setError('Failed to load candidates. Please try again.');
} finally {
setLoadingCandidates(false);
}
};
if (user) {
loadCandidates();
}
}, [user]);
// Handler for candidate search
const handleSearch = async () => {
setLoadingCandidates(true);
try {
const data = await fetchCandidates(searchQuery);
setCandidates(data);
} catch (err) {
setError('Search failed. Please try again.');
} finally {
setLoadingCandidates(false);
}
};
// Mock handlers for our analysis APIs
const fetchRequirements = async (): Promise<string[]> => {
// Simulates extracting requirements from the job description
await new Promise(resolve => setTimeout(resolve, 2000));
// This would normally parse the job description to extract requirements
const mockRequirements = [
"5+ years of React development experience",
"Strong TypeScript skills",
"Experience with RESTful APIs",
"Knowledge of state management solutions (Redux, Context API)",
"Experience with CI/CD pipelines",
"Cloud platform experience (AWS, Azure, GCP)"
];
return mockRequirements;
};
const fetchMatchForRequirement = async (requirement: string): Promise<any> => {
// Create different mock responses based on the requirement
const mockResponses: Record<string, any> = {
"5+ years of React development experience": {
requirement: "5+ years of React development experience",
status: "complete",
matchScore: 85,
assessment: "The candidate demonstrates extensive React experience spanning over 6 years, with a strong portfolio of complex applications and deep understanding of React's component lifecycle and hooks.",
citations: [
{
text: "Led frontend development team of 5 engineers to rebuild our customer portal using React and TypeScript, resulting in 40% improved performance and 30% reduction in bugs.",
source: "Resume, Work Experience",
relevance: 95
},
{
text: "Developed and maintained reusable React component library used across 12 different products within the organization.",
source: "Resume, Work Experience",
relevance: 90
},
{
text: "I've been working with React since 2017, building everything from small widgets to enterprise applications.",
source: "Cover Letter",
relevance: 85
}
]
},
"Strong TypeScript skills": {
requirement: "Strong TypeScript skills",
status: "complete",
matchScore: 90,
assessment: "The candidate shows excellent TypeScript proficiency through their work history and personal projects. They have implemented complex type systems and demonstrate an understanding of advanced TypeScript features.",
citations: [
{
text: "Converted a legacy JavaScript codebase of 100,000+ lines to TypeScript, implementing strict type checking and reducing runtime errors by 70%.",
source: "Resume, Projects",
relevance: 98
},
{
text: "Created comprehensive TypeScript interfaces for our GraphQL API, ensuring type safety across the entire application stack.",
source: "Resume, Technical Skills",
relevance: 95
}
]
},
"Experience with RESTful APIs": {
requirement: "Experience with RESTful APIs",
status: "complete",
matchScore: 75,
assessment: "The candidate has good experience with RESTful APIs, having both consumed and designed them. They understand REST principles but have less documented experience with API versioning and caching strategies.",
citations: [
{
text: "Designed and implemented a RESTful API serving over 1M requests daily with a focus on performance and scalability.",
source: "Resume, Technical Projects",
relevance: 85
},
{
text: "Worked extensively with third-party APIs including Stripe, Twilio, and Salesforce to integrate payment processing and communication features.",
source: "Resume, Work Experience",
relevance: 70
}
]
},
"Knowledge of state management solutions (Redux, Context API)": {
requirement: "Knowledge of state management solutions (Redux, Context API)",
status: "complete",
matchScore: 65,
assessment: "The candidate has moderate experience with state management, primarily using Redux. There is less evidence of Context API usage, which could indicate a knowledge gap in more modern React state management approaches.",
citations: [
{
text: "Implemented Redux for global state management in an e-commerce application, handling complex state logic for cart, user preferences, and product filtering.",
source: "Resume, Skills",
relevance: 80
},
{
text: "My experience includes working with state management libraries like Redux and MobX.",
source: "Cover Letter",
relevance: 60
}
]
},
"Experience with CI/CD pipelines": {
requirement: "Experience with CI/CD pipelines",
status: "complete",
matchScore: 40,
assessment: "The candidate shows limited experience with CI/CD pipelines. While they mention some exposure to Jenkins and GitLab CI, there is insufficient evidence of setting up or maintaining comprehensive CI/CD workflows.",
citations: [
{
text: "Familiar with CI/CD tools including Jenkins and GitLab CI.",
source: "Resume, Skills",
relevance: 40
}
]
},
"Cloud platform experience (AWS, Azure, GCP)": {
requirement: "Cloud platform experience (AWS, Azure, GCP)",
status: "complete",
matchScore: 30,
assessment: "The candidate demonstrates minimal experience with cloud platforms. There is a brief mention of AWS S3 and Lambda, but no substantial evidence of deeper cloud architecture knowledge or experience with Azure or GCP.",
citations: [
{
text: "Used AWS S3 for file storage and Lambda for image processing in a photo sharing application.",
source: "Resume, Projects",
relevance: 35
}
]
}
};
// Return a promise that resolves with the mock data after a delay
return new Promise((resolve) => {
// Different requirements resolve at different speeds to simulate real-world analysis
const delay = Math.random() * 5000 + 2000; // 2-7 seconds
setTimeout(() => {
resolve(mockResponses[requirement]);
}, delay);
});
};
// Navigation handlers
const handleNext = () => {
if (activeStep === 0 && !selectedCandidate) {
setError('Please select a candidate before continuing.');
return;
}
if (activeStep === 1 && (!jobTitle || !jobDescription)) {
setError('Please provide both job title and description before continuing.');
return;
}
if (activeStep === 2) {
setAnalysisStarted(true);
}
setActiveStep((prevActiveStep) => prevActiveStep + 1);
};
const handleBack = () => {
setActiveStep((prevActiveStep) => prevActiveStep - 1);
};
const handleReset = () => {
setActiveStep(0);
setSelectedCandidate(null);
setJobDescription('');
setJobTitle('');
setJobLocation('');
setAnalysisStarted(false);
};
// Render function for the candidate selection step
const renderCandidateSelection = () => (
<Paper elevation={3} sx={{ p: 3, mt: 3, mb: 4, borderRadius: 2 }}>
<Typography variant="h5" gutterBottom>
Select a Candidate
</Typography>
<Box sx={{ mb: 3, display: 'flex' }}>
<TextField
fullWidth
variant="outlined"
placeholder="Search candidates by name, title, or location"
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
InputProps={{
endAdornment: (
<InputAdornment position="end">
<IconButton onClick={handleSearch} edge="end">
<SearchIcon />
</IconButton>
</InputAdornment>
),
}}
sx={{ mr: 2 }}
/>
</Box>
{loadingCandidates ? (
<Box sx={{ display: 'flex', justifyContent: 'center', p: 4 }}>
<CircularProgress />
</Box>
) : candidates.length === 0 ? (
<Typography>No candidates found. Please adjust your search criteria.</Typography>
) : (
<Grid container spacing={3}>
{candidates.map((candidate) => (
<Grid size={{ xs: 12, sm: 6, md: 4 }} key={candidate.id}>
<Card
elevation={selectedCandidate?.id === candidate.id ? 8 : 1}
sx={{
height: '100%',
borderColor: selectedCandidate?.id === candidate.id ? theme.palette.primary.main : 'transparent',
borderWidth: 2,
borderStyle: 'solid',
transition: 'all 0.3s ease'
}}
>
<CardActionArea
onClick={() => setSelectedCandidate(candidate)}
sx={{ height: '100%', display: 'flex', flexDirection: 'column', alignItems: 'stretch' }}
>
<CardContent sx={{ flexGrow: 1, p: 3 }}>
<Box sx={{ display: 'flex', mb: 2, alignItems: 'center' }}>
<Avatar
src={candidate.photoUrl}
alt={candidate.name}
sx={{ width: 64, height: 64, mr: 2 }}
/>
<Box>
<Typography variant="h6" component="div">
{candidate.name}
</Typography>
<Typography variant="body2" color="text.secondary">
{candidate.title}
</Typography>
</Box>
</Box>
<Divider sx={{ my: 2 }} />
<Typography variant="body2" sx={{ mb: 1 }}>
<strong>Location:</strong> {candidate.location}
</Typography>
<Typography variant="body2" sx={{ mb: 1 }}>
<strong>Email:</strong> {candidate.email}
</Typography>
<Typography variant="body2">
<strong>Phone:</strong> {candidate.phone}
</Typography>
</CardContent>
</CardActionArea>
</Card>
</Grid>
))}
</Grid>
)}
</Paper>
);
// Render function for the job description step
const renderJobDescription = () => (
<Paper elevation={3} sx={{ p: 3, mt: 3, mb: 4, borderRadius: 2 }}>
<Typography variant="h5" gutterBottom>
Enter Job Details
</Typography>
<Grid container spacing={3}>
<Grid size={{ xs: 12, md: 6 }}>
<TextField
fullWidth
label="Job Title"
variant="outlined"
value={jobTitle}
onChange={(e) => setJobTitle(e.target.value)}
required
margin="normal"
/>
</Grid>
<Grid size={{ xs: 12, md: 6 }}>
<TextField
fullWidth
label="Job Location"
variant="outlined"
value={jobLocation}
onChange={(e) => setJobLocation(e.target.value)}
margin="normal"
/>
</Grid>
<Grid size={{ xs: 12 }}>
<Box sx={{ display: 'flex', alignItems: 'center', mt: 2, mb: 1 }}>
<Typography variant="subtitle1" sx={{ mr: 2 }}>
Job Description
</Typography>
<Button
variant="outlined"
startIcon={<FileUploadIcon />}
size="small"
onClick={() => setOpenUploadDialog(true)}
>
Upload
</Button>
</Box>
<TextField
fullWidth
multiline
rows={12}
placeholder="Enter the job description here..."
variant="outlined"
value={jobDescription}
onChange={(e) => setJobDescription(e.target.value)}
required
InputProps={{
startAdornment: (
<InputAdornment position="start" sx={{ alignSelf: 'flex-start', mt: 1.5 }}>
<DescriptionIcon color="action" />
</InputAdornment>
),
}}
/>
<Typography variant="caption" color="text.secondary" sx={{ mt: 1, display: 'block' }}>
The job description will be used to extract requirements for candidate matching.
</Typography>
</Grid>
</Grid>
</Paper>
);
// Render function for the analysis step
const renderAnalysis = () => (
<Box sx={{ mt: 3 }}>
{selectedCandidate && (
<JobMatchAnalysis
jobTitle={jobTitle}
candidateName={selectedCandidate.name}
fetchRequirements={fetchRequirements}
fetchMatchForRequirement={fetchMatchForRequirement}
/>
)}
</Box>
);
// If user is loading, show loading state
if (userLoading) {
return (
<Box sx={{ display: 'flex', justifyContent: 'center', alignItems: 'center', minHeight: '80vh' }}>
<CircularProgress />
</Box>
);
}
// If no user is logged in, show message
if (!user) {
return (
<Container maxWidth="md">
<Paper elevation={3} sx={{ p: 4, mt: 4, textAlign: 'center' }}>
<Typography variant="h5" gutterBottom>
Please log in to access candidate analysis
</Typography>
<Button variant="contained" color="primary" sx={{ mt: 2 }}>
Log In
</Button>
</Paper>
</Container>
);
}
return (
<Container maxWidth="lg">
<Paper elevation={1} sx={{ p: 3, mt: 3, borderRadius: 2 }}>
<Typography variant="h4" component="h1" gutterBottom>
Candidate Analysis
</Typography>
<Typography variant="subtitle1" color="text.secondary" gutterBottom>
Match candidates to job requirements with AI-powered analysis
</Typography>
</Paper>
<Box sx={{ mt: 4, mb: 4 }}>
<Stepper activeStep={activeStep} alternativeLabel>
{steps.map((step, index) => (
<Step key={index}>
<StepLabel StepIconComponent={() => (
<Avatar
sx={{
bgcolor: activeStep >= index ? theme.palette.primary.main : theme.palette.grey[300],
color: 'white'
}}
>
{step.icon}
</Avatar>
)}>
{step.label}
</StepLabel>
</Step>
))}
</Stepper>
</Box>
{activeStep === 0 && renderCandidateSelection()}
{activeStep === 1 && renderJobDescription()}
{activeStep === 2 && renderAnalysis()}
<Box sx={{ display: 'flex', flexDirection: 'row', pt: 2 }}>
<Button
color="inherit"
disabled={activeStep === 0}
onClick={handleBack}
sx={{ mr: 1 }}
>
Back
</Button>
<Box sx={{ flex: '1 1 auto' }} />
{activeStep === steps.length - 1 ? (
<Button onClick={handleReset} variant="outlined">
Start New Analysis
</Button>
) : (
<Button onClick={handleNext} variant="contained">
{activeStep === steps.length - 2 ? 'Start Analysis' : 'Next'}
</Button>
)}
</Box>
{/* Error Snackbar */}
<Snackbar
open={!!error}
autoHideDuration={6000}
onClose={() => setError(null)}
anchorOrigin={{ vertical: 'bottom', horizontal: 'center' }}
>
<Alert onClose={() => setError(null)} severity="error" sx={{ width: '100%' }}>
{error}
</Alert>
</Snackbar>
{/* Upload Dialog */}
<Dialog open={openUploadDialog} onClose={() => setOpenUploadDialog(false)}>
<DialogTitle>Upload Job Description</DialogTitle>
<DialogContent>
<DialogContentText>
Upload a job description document (.pdf, .docx, .txt, or .md)
</DialogContentText>
<Box sx={{ mt: 2, textAlign: 'center' }}>
<Button
variant="outlined"
component="label"
startIcon={<FileUploadIcon />}
sx={{ mt: 1 }}
>
Choose File
<input
type="file"
hidden
accept=".pdf,.docx,.txt,.md"
onChange={() => {
// This would handle file upload in a real application
setOpenUploadDialog(false);
// Mock setting job description from file
setJobDescription(
"Senior Frontend Developer\n\nRequired Skills:\n- 5+ years of React development experience\n- Strong TypeScript skills\n- Experience with RESTful APIs\n- Knowledge of state management solutions (Redux, Context API)\n- Experience with CI/CD pipelines\n- Cloud platform experience (AWS, Azure, GCP)\n\nResponsibilities:\n- Develop and maintain frontend applications using React and TypeScript\n- Collaborate with backend developers to integrate APIs\n- Optimize applications for maximum speed and scalability\n- Design and implement new features and functionality\n- Ensure the technical feasibility of UI/UX designs"
);
setJobTitle("Senior Frontend Developer");
setJobLocation("Remote");
}}
/>
</Button>
</Box>
</DialogContent>
<DialogActions>
<Button onClick={() => setOpenUploadDialog(false)}>Cancel</Button>
</DialogActions>
</Dialog>
</Container>
);
};
export { JobAnalysisPage };

View File

@ -48,7 +48,7 @@ try_import("prometheus_fastapi_instrumentator")
import ollama
from contextlib import asynccontextmanager
from fastapi import FastAPI, Request # type: ignore
from fastapi import FastAPI, Request, HTTPException # type: ignore
from fastapi.responses import JSONResponse, StreamingResponse, FileResponse, RedirectResponse # type: ignore
from fastapi.middleware.cors import CORSMiddleware # type: ignore
import uvicorn # type: ignore
@ -291,7 +291,7 @@ class WebServer:
async def get_umap(doc_id: str, context_id: str, request: Request):
logger.info(f"{request.method} {request.url.path}")
try:
context = self.upsert_context(context_id)
context = await self.upsert_context(context_id)
if not context:
return JSONResponse(
{"error": f"Invalid context: {context_id}"}, status_code=400
@ -322,7 +322,7 @@ class WebServer:
async def put_umap(context_id: str, request: Request):
logger.info(f"{request.method} {request.url.path}")
try:
context = self.upsert_context(context_id)
context = await self.upsert_context(context_id)
if not context:
return JSONResponse(
{"error": f"Invalid context: {context_id}"}, status_code=400
@ -364,7 +364,7 @@ class WebServer:
@self.app.put("/api/similarity/{context_id}")
async def put_similarity(context_id: str, request: Request):
logger.info(f"{request.method} {request.url.path}")
context = self.upsert_context(context_id)
context = await self.upsert_context(context_id)
user = context.user
try:
data = await request.json()
@ -427,7 +427,7 @@ class WebServer:
if not is_valid_uuid(context_id):
logger.warning(f"Invalid context_id: {context_id}")
return JSONResponse({"error": "Invalid context_id"}, status_code=400)
context = self.upsert_context(context_id)
context = await self.upsert_context(context_id)
agent = context.get_agent(agent_type)
if not agent:
response = { "history": [] }
@ -484,7 +484,7 @@ class WebServer:
{"error": "Usage: { reset: rags|tools|history|system_prompt}"}
)
else:
self.save_context(context_id)
await self.save_context(context_id)
return JSONResponse(response)
except Exception as e:
@ -498,7 +498,7 @@ class WebServer:
async def put_tunables(context_id: str, request: Request):
logger.info(f"{request.method} {request.url.path}")
try:
context = self.upsert_context(context_id)
context = await self.upsert_context(context_id)
data = await request.json()
agent = context.get_agent("chat")
@ -525,7 +525,7 @@ class WebServer:
for context_tool in context.tools:
if context_tool.tool.function.name == tool["name"]:
context_tool.enabled = tool.get("enabled", True)
self.save_context(context_id)
await self.save_context(context_id)
return JSONResponse({
"tools": [{
**t.function.model_dump(mode='json'),
@ -548,7 +548,7 @@ class WebServer:
for context_rag in context.rags:
if context_rag.name == config["name"]:
context_rag.enabled = config["enabled"]
self.save_context(context_id)
await self.save_context(context_id)
return JSONResponse({"rags": [ r.model_dump(mode="json") for r in context.rags]})
case "system_prompt":
@ -561,7 +561,7 @@ class WebServer:
}
)
agent.system_prompt = system_prompt
self.save_context(context_id)
await self.save_context(context_id)
return JSONResponse({"system_prompt": system_prompt})
case _:
return JSONResponse(
@ -574,7 +574,8 @@ class WebServer:
@self.app.get("/api/user/{context_id}")
async def get_user(context_id: str, request: Request):
logger.info(f"{request.method} {request.url.path}")
user = self.upsert_context(context_id).user
context = await self.upsert_context(context_id)
user = context.user
user_data = {
"username": user.username,
"first_name": user.first_name,
@ -583,7 +584,7 @@ class WebServer:
"description": user.description,
"contact_info": user.contact_info,
"rag_content_size": user.rag_content_size,
"profile_url": user.profile_url,
"has_profile": user.has_profile,
"questions": [ q.model_dump(mode='json') for q in user.user_questions],
}
return JSONResponse(user_data)
@ -591,7 +592,7 @@ class WebServer:
@self.app.get("/api/tunables/{context_id}")
async def get_tunables(context_id: str, request: Request):
logger.info(f"{request.method} {request.url.path}")
context = self.upsert_context(context_id)
context = await self.upsert_context(context_id)
agent = context.get_agent("chat")
if not agent:
logger.info("chat agent does not exist on this context!")
@ -622,7 +623,7 @@ class WebServer:
logger.info(f"{request.method} {request.url.path}")
try:
context = self.upsert_context(context_id)
context = await self.upsert_context(context_id)
except Exception as e:
error = {
"error": f"Unable to create or access context {context_id}: {e}"
@ -644,6 +645,7 @@ class WebServer:
error = {
"error": f"Attempt to create agent type: {agent_type} failed: {e}"
}
logger.error(traceback.format_exc())
logger.info(error)
return JSONResponse(error, status_code=404)
@ -683,19 +685,23 @@ class WebServer:
except Exception as e:
result = {"status": "error", "response": str(e)}
yield json.dumps(result) + "\n"
return
break
# Convert to JSON and add newline
result = json.dumps(result) + "\n"
message.network_packets += 1
message.network_bytes += len(result)
disconnected = await request.is_disconnected()
try:
disconnected = await request.is_disconnected()
except Exception as e:
logger.warning(f"Disconnection check failed: {e}")
disconnected = True
if disconnected:
logger.info("Disconnect detected. Continuing generation to store in cache.")
disconnected = True
if not disconnected:
yield result
yield json.dumps(result) + "\n"
current_time = time.perf_counter()
if current_time - start_time > LLM_TIMEOUT:
@ -704,13 +710,12 @@ class WebServer:
message.partial_response = message.response
logger.info(message.response + " Ending session")
result = message.model_dump(by_alias=True, mode="json")
result = json.dumps(result) + "\n"
if not disconnected:
yield result
yield json.dumps(result) + "\n"
if message.status == "error":
context.processing = False
return
break
# Allow the event loop to process the write
await asyncio.sleep(0)
@ -721,7 +726,8 @@ class WebServer:
yield json.dumps({"status": "error", "response": str(e)}) + "\n"
finally:
# Save context on completion or error
self.save_context(context_id)
await self.save_context(context_id)
logger.info("Flush generator completed normally.")
# Return StreamingResponse with appropriate headers
return StreamingResponse(
@ -741,13 +747,13 @@ class WebServer:
@self.app.post("/api/create-session")
async def create_session(request: Request):
logger.info(f"{request.method} {request.url.path}")
context = self.create_context(username=defines.default_username)
context = await self.create_context(username=defines.default_username)
return JSONResponse({"id": context.id})
@self.app.get("/api/join-session/{context_id}")
async def join_session(context_id: str, request: Request):
logger.info(f"{request.method} {request.url.path}")
context = self.load_context(context_id=context_id)
context = await self.load_context(context_id=context_id)
if not context:
return JSONResponse({"error": f"{context_id} does not exist."}, 404)
return JSONResponse({"id": context.id})
@ -756,7 +762,7 @@ class WebServer:
async def get_users(context_id: str, request: Request):
logger.info(f"{request.method} {request.url.path}")
try:
context = self.load_context(context_id)
context = await self.load_context(context_id)
if not context:
return JSONResponse({"error": f"Context {context_id} not found."}, status_code=404)
@ -768,13 +774,29 @@ class WebServer:
logger.error(f"get_users error: {str(e)}")
return JSONResponse({ "error": "Unable to parse users"}, 500)
@self.app.get("/api/u/{username}/profile/{context_id}")
async def get_user_profile(username: str, context_id: str, request: Request):
logger.info(f"{request.method} {request.url.path}")
try:
if not User.exists(username):
return JSONResponse({"error": f"User {username} not found."}, status_code=404)
context = await self.load_context(context_id)
if not context:
return JSONResponse({"error": f"Context {context_id} not found."}, status_code=404)
profile_path = os.path.join(defines.user_dir, username, f"profile.png")
if not os.path.exists(profile_path):
return JSONResponse({ "error": "User {username} does not have a profile picture"}, status_code=404)
return FileResponse(profile_path)
except Exception as e:
return JSONResponse({ "error": "Unable to load user {username}"}, 500)
@self.app.post("/api/u/{username}/{context_id}")
async def post_user(username: str, context_id: str, request: Request):
logger.info(f"{request.method} {request.url.path}")
try:
if not User.exists(username):
return JSONResponse({"error": f"User {username} not found."}, status_code=404)
context = self.load_context(context_id)
context = await self.load_context(context_id)
if not context:
return JSONResponse({"error": f"Context {context_id} not found."}, status_code=404)
matching_user = next((user for user in self.users if user.username == username), None)
@ -782,7 +804,7 @@ class WebServer:
user = matching_user
else:
user = User(username=username, llm=self.llm)
user.initialize(prometheus_collector=self.prometheus_collector)
await user.initialize(prometheus_collector=self.prometheus_collector)
self.users.append(user)
reset_map = (
"chat",
@ -807,10 +829,10 @@ class WebServer:
"description": user.description,
"contact_info": user.contact_info,
"rag_content_size": user.rag_content_size,
"profile_url": user.profile_url,
"has_profile": user.has_profile,
"questions": [ q.model_dump(mode='json') for q in user.user_questions],
}
self.save_context(context_id)
await self.save_context(context_id)
return JSONResponse(user_data)
except Exception as e:
return JSONResponse({ "error": "Unable to load user {username}"}, 500)
@ -822,7 +844,7 @@ class WebServer:
if not User.exists(username):
return JSONResponse({"error": f"User {username} not found."}, status_code=404)
context = self.create_context(username=username)
context = await self.create_context(username=username)
logger.info(f"Generated new context {context.id} for {username}")
return JSONResponse({"id": context.id})
except Exception as e:
@ -839,7 +861,7 @@ class WebServer:
async def get_history(context_id: str, agent_type: str, request: Request):
logger.info(f"{request.method} {request.url.path}")
try:
context = self.upsert_context(context_id)
context = await self.upsert_context(context_id)
agent = context.get_agent(agent_type)
if not agent:
logger.info(
@ -858,7 +880,7 @@ class WebServer:
@self.app.get("/api/tools/{context_id}")
async def get_tools(context_id: str, request: Request):
logger.info(f"{request.method} {request.url.path}")
context = self.upsert_context(context_id)
context = await self.upsert_context(context_id)
return JSONResponse(context.tools)
@self.app.put("/api/tools/{context_id}")
@ -867,7 +889,7 @@ class WebServer:
if not is_valid_uuid(context_id):
logger.warning(f"Invalid context_id: {context_id}")
return JSONResponse({"error": "Invalid context_id"}, status_code=400)
context = self.upsert_context(context_id)
context = await self.upsert_context(context_id)
try:
data = await request.json()
modify = data["tool"]
@ -875,7 +897,7 @@ class WebServer:
for tool in context.tools:
if modify == tool.function.name:
tool.enabled = enabled
self.save_context(context_id)
await self.save_context(context_id)
return JSONResponse(context.tools)
return JSONResponse(
{"status": f"{modify} not found in tools."}, status_code=404
@ -889,7 +911,7 @@ class WebServer:
if not is_valid_uuid(context_id):
logger.warning(f"Invalid context_id: {context_id}")
return JSONResponse({"error": "Invalid context_id"}, status_code=400)
context = self.upsert_context(context_id)
context = await self.upsert_context(context_id)
agent = context.get_agent(agent_type)
if not agent:
return JSONResponse(
@ -909,14 +931,27 @@ class WebServer:
@self.app.get("/{path:path}")
async def serve_static(path: str, request: Request):
full_path = os.path.join(defines.static_content, path)
# Check if the original path exists
if os.path.exists(full_path) and os.path.isfile(full_path):
logger.info(f"Serve static request for {full_path}")
return FileResponse(full_path)
logger.info(f"Serve index.html for {path}")
return FileResponse(os.path.join(defines.static_content, "index.html"))
# Check if the path matches /{filename}.png
# if path.startswith("/") and path.endswith(".png"):
# filename = path[1:-4] # Remove leading '/' and trailing '.png'
# alt_path = f"/opt/backstory/users/{filename}/{filename}.png"
# # Check if the alternative path exists
# if os.path.exists(alt_path) and os.path.isfile(alt_path):
# logger.info(f"Serve static request for alternative path {alt_path}")
# return FileResponse(alt_path)
# If neither path exists, return 404
logger.info(f"File not found for path {full_path}")
raise HTTPException(status_code=404, detail="File not found")
def save_context(self, context_id):
async def save_context(self, context_id):
"""
Serialize a Python dictionary to a file in the agents directory.
@ -927,7 +962,7 @@ class WebServer:
Returns:
The context_id used for the file
"""
context = self.upsert_context(context_id)
context = await self.upsert_context(context_id)
# Create agents directory if it doesn't exist
if not os.path.exists(defines.context_dir):
@ -969,7 +1004,7 @@ class WebServer:
return context_id
def load_context(self, context_id: str) -> Context | None:
async def load_context(self, context_id: str) -> Context | None:
"""
Load a context from a file in the context directory or create a new one if it doesn't exist.
Args:
@ -1005,7 +1040,7 @@ class WebServer:
user = matching_user
else:
user = User(username=username, llm=self.llm)
user.initialize(prometheus_collector=self.prometheus_collector)
await user.initialize(prometheus_collector=self.prometheus_collector)
self.users.append(user)
context.user = user
@ -1036,7 +1071,7 @@ class WebServer:
return self.contexts[context_id]
def load_or_create_context(self, context_id: str) -> Context:
async def load_or_create_context(self, context_id: str) -> Context:
"""
Load a context from a file in the context directory or create a new one if it doesn't exist.
Args:
@ -1044,14 +1079,14 @@ class WebServer:
Returns:
A Context object with the specified ID and default settings.
"""
context = self.load_context(context_id)
context = await self.load_context(context_id)
if context:
return context
logger.info(f"Context not found. Creating new instance of context {context_id}.")
self.contexts[context_id] = self.create_context(username=defines.default_username, context_id=context_id)
self.contexts[context_id] = await self.create_context(username=defines.default_username, context_id=context_id)
return self.contexts[context_id]
def create_context(self, username: str, context_id=None) -> Context:
async def create_context(self, username: str, context_id=None) -> Context:
"""
Create a new context with a unique ID and default settings.
Args:
@ -1069,7 +1104,7 @@ class WebServer:
logger.info(f"Found matching user: {user.username}")
else:
user = User(username=username, llm=self.llm)
user.initialize(prometheus_collector=self.prometheus_collector)
await user.initialize(prometheus_collector=self.prometheus_collector)
logger.info(f"Created new instance of user: {user.username}")
self.users.append(user)
@ -1107,10 +1142,10 @@ class WebServer:
logger.info(f"{context.id} created and added to contexts.")
self.contexts[context.id] = context
self.save_context(context.id)
await self.save_context(context.id)
return context
def upsert_context(self, context_id=None) -> Context:
async def upsert_context(self, context_id=None) -> Context:
"""
Upsert a context based on the provided context_id.
Args:
@ -1121,13 +1156,13 @@ class WebServer:
if not context_id:
logger.warning("No context ID provided. Creating a new context.")
return self.create_context(username=defines.default_username)
return await self.create_context(username=defines.default_username)
if context_id in self.contexts:
return self.contexts[context_id]
logger.info(f"Context {context_id} is not yet loaded.")
return self.load_or_create_context(context_id=context_id)
return await self.load_or_create_context(context_id=context_id)
@REQUEST_TIME.time()
async def generate_response(

View File

@ -17,6 +17,7 @@ from .setup_logging import setup_logging
from .agents import class_registry, AnyAgent, Agent, __all__ as agents_all
from .metrics import Metrics
from .check_serializable import check_serializable
from .profile_image import generate_image_status, ImageRequest
__all__ = [
"Agent",
@ -33,6 +34,7 @@ __all__ = [
"check_serializable",
"logger",
"User",
"generate_image_status", "ImageRequest"
]
__all__.extend(agents_all) # type: ignore

445
src/utils/agents/persona.py Normal file
View File

@ -0,0 +1,445 @@
from __future__ import annotations
from pydantic import model_validator, Field, BaseModel # type: ignore
from typing import (
Dict,
Literal,
ClassVar,
cast,
Any,
AsyncGenerator,
List,
Optional
# override
) # NOTE: You must import Optional for late binding to work
import inspect
import random
import re
import json
import traceback
import asyncio
import time
import asyncio
import time
import os
from . base import Agent, agent_registry, LLMMessage
from .. message import Message
from .. rag import ChromaDBGetResponse
from .. setup_logging import setup_logging
from .. profile_image import generate_image_status, ImageRequest
from .. import defines
from .. user import User
logger = setup_logging()
seed = int(time.time())
random.seed(seed)
emptyUser = {
"profile_url": "",
"description": "",
"rag_content_size": 0,
"username": "",
"first_name": "",
"last_name": "",
"full_name": "",
"contact_info": {},
"questions": [],
}
generate_persona_system_prompt = """\
You are a casing director for a movie. Your job is to provide information on ficticious personas for use in a screen play.
All response field MUST BE IN ENGLISH, regardless of ethnicity.
You will be provided with defaults to use if not specified by the user:
```json
{
"age": number,
"gender": "male" | "female",
"ethnicity": string,
}
```
Additional information provided in the user message can override those defaults.
You need to randomly assign an English username (can include numbers), a first name, last name, and a two English sentence description of that individual's work given the demographics provided.
Your response must be in JSON.
Provide only the JSON response, and match the field names EXACTLY.
Provide all information in English ONLY, with no other commentary:
```json
{
"username": string, # A likely-to-be unique username, no more than 15 characters (can include numbers and letters but no special characters)
"first_name": string,
"last_name": string,
"description": string, # One to two sentence description of their job
"location": string, # In the location, provide ALL of: City, State/Region, and Country
}
```
Make sure to provide a username and that the field name for the job description is "description".
"""
generate_resume_system_prompt = """
You are a creative writing casting director. As part of the casting, you are building backstories about individuals. The first part
of that is to create an in-depth resume for the person. You will be provided with the following information:
```json
"full_name": string, # Person full name
"location": string, # Location of residence
"age": number, # Age of candidate
"description": string # A brief description of the person
```
Use that information to invent a full career resume. Include sections such as:
* Contact information
* Job goal
* Top skills
* Detailed work history. If they are under the age of 25, you might include skills, hobbies, or volunteering they may have done while an adolescent
* In the work history, provide company names, years of employment, and their role
* Education
Provide the resume in Markdown format. DO NOT provide any commentary before or after the resume.
"""
class PersonaGenerator(Agent):
agent_type: Literal["persona"] = "persona" # type: ignore
_agent_type: ClassVar[str] = agent_type # Add this for registration
system_prompt: str = generate_persona_system_prompt
age: int = Field(default_factory=lambda: random.randint(22, 67))
gender: str = Field(default_factory=lambda: random.choice(["male", "female"]))
ethnicity: Literal[
"Asian", "African", "Caucasian", "Hispanic/Latino", "Mixed/Multiracial"
] = Field(
default_factory=lambda: random.choices(
["Asian", "African", "Caucasian", "Hispanic/Latino", "Mixed/Multiracial"],
weights=[57.69, 15.38, 19.23, 5.77, 1.92],
k=1
)[0]
)
username: str = ""
llm: Any = Field(default=None, exclude=True)
model: str = Field(default=None, exclude=True)
def randomize(self):
self.age = random.randint(22, 67)
self.gender = random.choice(["male", "female"])
# Use random.choices with explicit type casting to satisfy Literal type
self.ethnicity = cast(
Literal["Asian", "African", "Caucasian", "Hispanic/Latino", "Mixed/Multiracial"],
random.choices(
["Asian", "African", "Caucasian", "Hispanic/Latino", "Mixed/Multiracial"],
weights=[57.69, 15.38, 19.23, 5.77, 1.92],
k=1
)[0]
)
async def prepare_message(self, message: Message) -> AsyncGenerator[Message, None]:
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
if not self.context:
raise ValueError("Context is not set for this agent.")
message.tunables.enable_tools = False
message.tunables.enable_rag = False
message.tunables.enable_context = False
message.prompt = f"""\
```json
{json.dumps({
"age": self.age,
"gender": self.gender,
"ethnicity": self.ethnicity
})}
```
{message.prompt}
"""
message.status = "done"
yield message
return
async def process_message(
self, llm: Any, model: str, message: Message
) -> AsyncGenerator[Message, None]:
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
if not self.context:
raise ValueError("Context is not set for this agent.")
self.llm = llm
self.model = model
original_prompt = message.prompt
spinner: List[str] = ["\\", "|", "/", "-"]
tick: int = 0
while self.context.processing:
logger.info(
"TODO: Implement delay queing; busy for same agent, otherwise return queue size and estimated wait time"
)
message.status = "waiting"
message.response = (
f"Busy processing another request. Please wait. {spinner[tick]}"
)
tick = (tick + 1) % len(spinner)
yield message
await asyncio.sleep(1) # Allow the event loop to process the write
self.context.processing = True
try:
#
# Generate the persona
#
async for message in self.call_llm(
message=message, system_prompt=self.system_prompt, prompt=original_prompt
):
if message.status != "done":
yield message
if message.status == "error":
raise Exception(message.response)
json_str = self.extract_json_from_text(message.response)
try:
persona = json.loads(json_str) | {
"age": self.age,
"gender": self.gender,
"ethnicity": self.ethnicity
}
if not persona.get("full_name", None):
persona["full_name"] = f"{persona['first_name']} {persona['last_name']}"
self.username = persona.get("username", None)
if not self.username:
raise ValueError("LLM did not generate a username")
user_dir = os.path.join(defines.user_dir, persona["username"])
while os.path.exists(user_dir):
match = re.match(r"^(.*?)(\d*)$", persona["username"])
if match:
base = match.group(1)
num = match.group(2)
iteration = int(num) + 1 if num else 1
persona["username"] = f"{base}{iteration}"
user_dir = os.path.join(defines.user_dir, persona["username"])
for key in persona:
if isinstance(persona[key], str):
persona[key] = persona[key].strip()
# Mark this persona as AI generated
persona["is_ai"] = True
except Exception as e:
message.response = f"Unable to parse LLM returned content: {json_str} {str(e)}"
message.status = "error"
logger.error(traceback.format_exc())
logger.error(message.response)
yield message
return
# Persona generated
message.response = json.dumps(persona)
message.status = "partial"
yield message
#
# Generate the resume
#
message.status = "thinking"
message.response = f"Generating resume for {persona['full_name']}..."
yield message
prompt = f"""
```json
{{
"full_name": "{persona["full_name"]}",
"location": "{persona["location"]}",
"age": {persona["age"]},
"description": {persona["description"]}
}}
```
"""
try:
async for message in self.call_llm(
message=message, system_prompt=generate_resume_system_prompt, prompt=prompt
):
if message.status != "done":
yield message
if message.status == "error":
raise Exception(message.response)
except Exception as e:
message.response = f"Unable to parse LLM returned content: {json_str} {str(e)}"
message.status = "error"
logger.error(traceback.format_exc())
logger.error(message.response)
yield message
return
resume = self.extract_markdown_from_text(message.response)
if resume:
user_resume_dir = os.path.join(defines.user_dir, persona["username"], defines.resume_doc_dir)
os.makedirs(user_resume_dir, exist_ok=True)
user_resume_file = os.path.join(user_resume_dir, defines.resume_doc)
with open(user_resume_file, "w") as f:
f.write(resume)
# Resume generated
message.response = resume
message.status = "partial"
yield message
#
# Generate RAG database
#
message.status = "thinking"
message.response = f"Generating RAG content from resume..."
yield message
# Prior to instancing a new User, the json data has to be created
# so the system can process it
user_dir = os.path.join(defines.user_dir, persona["username"])
os.makedirs(user_dir, exist_ok=True)
user_info = os.path.join(user_dir, "info.json")
with open(user_info, "w") as f:
f.write(json.dumps(persona, indent=2))
user = User(llm=self.llm, username=self.username)
await user.initialize()
await user.file_watcher.initialize_collection()
# RAG content generated
message.status = "partial"
message.response = f"{user.file_watcher.collection.count()} entries created in RAG vector store."
yield message
#
# Generate the profile picture
#
prompt = f"A photorealistic profile picture of a {persona["age"]} year old {persona["gender"]} {persona["ethnicity"]} person."
if original_prompt:
prompt = f"{prompt} {original_prompt}"
message.status = "thinking"
message.response = prompt
yield message
request = ImageRequest(filepath=os.path.join(defines.user_dir, persona["username"], f"profile.png"), prompt=prompt)
placeholder = Message(prompt=prompt)
async for placeholder in generate_image_status(
message=placeholder,
**request.model_dump()
):
if placeholder.status != "done":
placeholder.response = placeholder.response
yield message
persona["has_profile"] = True
#
# Write out the completed user information
#
with open(user_info, "w") as f:
f.write(json.dumps(persona, indent=2))
# Image generated
message.status = "done"
message.response = json.dumps(persona)
except Exception as e:
message.status = "error"
logger.error(traceback.format_exc())
logger.error(message.response)
message.response = f"Error in persona generation: {str(e)}"
logger.error(message.response)
self.randomize() # Randomize for next generation
yield message
return
# Done processing, add message to conversation
self.context.processing = False
self.randomize() # Randomize for next generation
# Return the final message
yield message
return
async def call_llm(self, message: Message, system_prompt, prompt, temperature=0.7):
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
messages: List[LLMMessage] = [
LLMMessage(role="system", content=system_prompt),
LLMMessage(role="user", content=prompt),
]
message.metadata.options = {
"seed": 8911,
"num_ctx": self.context_size,
"temperature": temperature, # Higher temperature to encourage tool usage
}
message.status = "streaming"
yield message
last_chunk_time = 0
message.chunk = ""
message.response = ""
for response in self.llm.chat(
model=self.model,
messages=messages,
options={
**message.metadata.options,
},
stream=True,
):
if not response:
message.status = "error"
message.response = "No response from LLM."
yield message
return
message.status = "streaming"
message.chunk += response.message.content
message.response += response.message.content
if not response.done:
now = time.perf_counter()
if now - last_chunk_time > 0.25:
yield message
last_chunk_time = now
message.chunk = ""
if response.done:
self.collect_metrics(response)
message.metadata.eval_count += response.eval_count
message.metadata.eval_duration += response.eval_duration
message.metadata.prompt_eval_count += response.prompt_eval_count
message.metadata.prompt_eval_duration += response.prompt_eval_duration
self.context_tokens = response.prompt_eval_count + response.eval_count
message.chunk = ""
message.status = "done"
yield message
def extract_json_from_text(self, text: str) -> str:
"""Extract JSON string from text that may contain other content."""
json_pattern = r"```json\s*([\s\S]*?)\s*```"
match = re.search(json_pattern, text)
if match:
return match.group(1).strip()
# Try to find JSON without the markdown code block
json_pattern = r"({[\s\S]*})"
match = re.search(json_pattern, text)
if match:
return match.group(1).strip()
raise ValueError("No JSON found in the response")
def extract_markdown_from_text(self, text: str) -> str:
"""Extract Markdown string from text that may contain other content."""
markdown_pattern = r"```(md|markdown)\s*([\s\S]*?)\s*```"
match = re.search(markdown_pattern, text)
if match:
return match.group(2).strip()
raise ValueError("No Markdown found in the response")
# Register the base agent
agent_registry.register(PersonaGenerator._agent_type, PersonaGenerator)

View File

@ -7,7 +7,8 @@ user_info_file = "info.json" # Relative to "{user_dir}/{use
default_username = "jketreno"
rag_content_dir = "rag-content" # Relative to "{user_dir}/{user}"
# Path to candidate full resume
resume_doc = "rag-content/resume/resume.md" # Relative to "{user_dir}/{user}/" (does not have to be in docs)
resume_doc_dir = f"{rag_content_dir}/resume" # Relative to "{user_dir}/{user}
resume_doc = "resume.md"
persist_directory = "db" # Relative to "{user_dir}/{user}"
# Model name License Notes

View File

@ -0,0 +1,101 @@
import asyncio
import gc
import re
import time
from typing import Any
import torch # type: ignore
from diffusers import StableDiffusionPipeline, FluxPipeline # type: ignore
class ImageModelCache:
def __init__(self, timeout_seconds: float = 60 * 15):
self._pipe = None
self._model_name = None
self._device = None
self._last_access_time = 0
self._timeout_seconds = timeout_seconds
self._lock = asyncio.Lock()
self._cleanup_task = None
async def start(self):
if self._cleanup_task is None:
self._cleanup_task = asyncio.create_task(self._periodic_cleanup())
def _get_model_type(self, model_name: str) -> str:
if re.search(r"stable-diffusion", model_name, re.IGNORECASE):
return "stable"
return "flux"
async def get_pipeline(self, model: str, device: str) -> Any:
await self.start() # Ensure cleanup task starts on first use
async with self._lock:
current_time = time.time()
current_model_type = self._get_model_type(model)
cached_model_type = self._get_model_type(self._model_name) if self._model_name else None
if (
self._pipe is not None and
self._model_name == model and
self._device == device and
current_model_type == cached_model_type and
current_time - self._last_access_time < self._timeout_seconds
):
self._last_access_time = current_time
return self._pipe
await self._unload_model()
if current_model_type == "stable":
pipe = StableDiffusionPipeline.from_pretrained(
model,
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
)
def dummy_safety_checker(images, clip_input):
return images, [False] * len(images)
pipe.safety_checker = dummy_safety_checker
else:
pipe = FluxPipeline.from_pretrained(
model,
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
)
try:
pipe.load_lora_weights('enhanceaiteam/Flux-uncensored', weight_name='lora.safetensors')
except Exception as e:
raise Exception(f"Failed to load LoRA weights: {str(e)}")
pipe = pipe.to(device)
self._pipe = pipe
self._model_name = model
self._device = device
self._last_access_time = current_time
return pipe
async def _unload_model(self):
if self._pipe is not None:
try:
del self._pipe
gc.collect()
if self._device == "cuda":
torch.cuda.empty_cache()
elif self._device == "xpu":
torch.xpu.empty_cache()
except Exception:
pass
self._pipe = None
self._model_name = None
self._device = None
async def cleanup_if_expired(self):
async with self._lock:
if (
self._pipe is not None and
time.time() - self._last_access_time >= self._timeout_seconds
):
await self._unload_model()
async def _periodic_cleanup(self):
while True:
await asyncio.sleep(self._timeout_seconds)
await self.cleanup_if_expired()

View File

@ -10,7 +10,6 @@ class Chunk(TypedDict):
text: str
metadata: Dict[str, Any]
def clear_chunk(chunk: Chunk):
chunk["text"] = ""
chunk["metadata"] = {
@ -22,7 +21,6 @@ def clear_chunk(chunk: Chunk):
}
return chunk
class MarkdownChunker:
def __init__(self):
# Initialize the Markdown parser
@ -45,6 +43,7 @@ class MarkdownChunker:
# Parse the markdown
tokens = self.md_parser.parse(content)
logging.info(f"Found {len(tokens)} in {file_path}")
ast = SyntaxTreeNode(tokens)

185
src/utils/profile_image.py Normal file
View File

@ -0,0 +1,185 @@
from __future__ import annotations
from pydantic import BaseModel, Field # type: ignore
from typing import Dict, Literal, Any, AsyncGenerator, Optional
import inspect
import random
import re
import json
import traceback
import asyncio
import time
import os
import gc
import tempfile
import uuid
import torch # type: ignore
from .agents.base import Agent, agent_registry, LLMMessage
from .message import Message
from .rag import ChromaDBGetResponse
from .setup_logging import setup_logging
from .image_model_cache import ImageModelCache
logger = setup_logging()
# Heuristic time estimates (in seconds) for different models and devices at 512x512
TIME_ESTIMATES = {
"stable-diffusion": {
"cuda": {"load": 5, "per_step": 0.5},
"xpu": {"load": 7, "per_step": 0.7},
"cpu": {"load": 20, "per_step": 5.0},
},
"flux": {
"cuda": {"load": 10, "per_step": 0.8},
"xpu": {"load": 15, "per_step": 1.0},
"cpu": {"load": 30, "per_step": 10.0},
}
}
class ImageRequest(BaseModel):
filepath: str
prompt: str
model: str = "black-forest-labs/FLUX.1-schnell"
iterations: int = 4
height: int = 256
width: int = 256
# Global model cache instance
model_cache = ImageModelCache(timeout_seconds=60 * 15) # 15 minutes
def status(message: Message, status: str, progress: float = 0, estimated_time_remaining="...") -> Message:
message.status = "thinking"
message.response = json.dumps({
"status": status,
"progress": progress,
"estimated_time_remaining": estimated_time_remaining
})
return message
async def generate_image_status(message: Message, model: str, prompt: str, iterations: int, filepath: str, height: int = 512, width: int = 512) -> AsyncGenerator[Message, None]:
"""Generate an image with specified dimensions and yield status updates with time estimates."""
try:
# Validate prompt
prompt = prompt.strip()
if not prompt:
message.status = "error"
message.response = "Prompt cannot be empty"
yield message
return
# Validate dimensions
if height <= 0 or width <= 0:
message.status = "error"
message.response = "Height and width must be positive"
yield message
return
if re.match(r".*stable-diffusion.*", model):
if height % 8 != 0 or width % 8 != 0:
message.status = "error"
message.response = "Stable Diffusion requires height and width to be multiples of 8"
yield message
return
filedir = os.path.dirname(filepath)
filename = os.path.basename(filepath)
os.makedirs(filedir, exist_ok=True)
model_type = "stable-diffusion" if re.match(r".*stable-diffusion.*", model) else "flux"
if model_type == "flux":
device = "cpu"
else:
device = "cuda" if torch.cuda.is_available() else "xpu" if torch.xpu.is_available() else "cpu"
yield status(message, f"Starting image generation for prompt: {prompt} {width}x{height} as {filename} using {device}")
# Get initial time estimate, scaled by resolution
estimates = TIME_ESTIMATES[model_type][device]
resolution_scale = (height * width) / (512 * 512)
estimated_total = estimates["load"] + estimates["per_step"] * iterations * resolution_scale
yield status(message, f"Estimated generation time: ~{estimated_total:.1f} seconds for {width}x{height}")
# Initialize or get cached pipeline
start_time = time.time()
yield status(message, f"Loading {model_type} model: {model}")
pipe = await model_cache.get_pipeline(model, device)
load_time = time.time() - start_time
yield status(message, f"Model loaded in {load_time:.1f} seconds. Generating image with {iterations} inference steps", progress=10)
# Generate image with progress tracking
start_gen_time = time.time()
if model_type == "stable-diffusion":
steps_completed = 0
last_step_time = start_gen_time
def progress_callback(step: int, timestep: int, latents: torch.Tensor):
nonlocal steps_completed, last_step_time
steps_completed += 1
current_time = time.time()
step_time = current_time - last_step_time
last_step_time = current_time
progress = (steps_completed / iterations) * 80 + 10 # Scale from 10% to 90%
remaining_steps = iterations - steps_completed
estimated_remaining = step_time * remaining_steps * resolution_scale
yield status(message=message, status=f"Step {steps_completed}/{iterations} completed", progress=progress, estimated_time_remaining=str(estimated_remaining))
async def capture_progress(step: int, timestep: int, latents: torch.Tensor):
for msg in progress_callback(step, timestep, latents):
yield msg
yield status(message, f"Generating image with {iterations} inference steps")
image = pipe(
prompt,
num_inference_steps=iterations,
guidance_scale=7.5,
height=height,
width=width,
callback=capture_progress,
callback_steps=1
).images[0]
else:
# Flux: Run generation in the background and yield progress updates
estimated_gen_time = estimates["per_step"] * iterations * resolution_scale
yield status(message, f"Starting Flux image generation with {iterations} inference steps", estimated_time_remaining=estimated_gen_time)
image = pipe(
prompt,
num_inference_steps=iterations,
guidance_scale=7.5,
height=height,
width=width
).images[0]
# Start the generation task
start_gen_time = time.time()
gen_time = time.time() - start_gen_time
per_step_time = gen_time / iterations if iterations > 0 else gen_time
yield status(message, f"Image generated in {gen_time:.1f} seconds, {per_step_time:.1f} per iteration.", 90)
gen_time = time.time() - start_gen_time
per_step_time = gen_time / iterations if iterations > 0 else gen_time
yield status(message, f"Image generated in {gen_time:.1f} seconds, {per_step_time:.1f} per iteration.", 90)
# Save image
yield status(message, f"Saving image to {filepath}", 95)
image.save(filepath)
# Final result
total_time = time.time() - start_time
message.status = "done"
message.response = json.dumps({
"status": f"Image generation complete in {total_time:.1f} seconds",
"progress": 100,
"filename": filepath
})
yield message
except Exception as e:
message.status = "error"
message.response = str(e)
yield message
logger.error(traceback.format_exc())
logger.error(message.response)
return

View File

@ -11,7 +11,7 @@ import json
import numpy as np # type: ignore
import traceback
import chromadb
import chromadb # type: ignore
import ollama
from watchdog.observers import Observer # type: ignore
from watchdog.events import FileSystemEventHandler # type: ignore
@ -215,6 +215,7 @@ class ChromaDBFileWatcher(FileSystemEventHandler):
files_checked += 1
current_hash = self._get_file_hash(file_path)
if not current_hash:
logging.info(f"Unable to obtain hash of {file_path}")
continue
# If file is new, changed, or we're processing all files
@ -255,6 +256,7 @@ class ChromaDBFileWatcher(FileSystemEventHandler):
for file_path in files_to_process:
async with self.update_lock:
files_processed += 1
await self._update_document_in_collection(file_path)
else:
logging.info("No files to process")
@ -632,6 +634,7 @@ class ChromaDBFileWatcher(FileSystemEventHandler):
chunks = self._markdown_chunker.process_file(file_path)
if not chunks:
logging.info(f"No chunks found in markdown: {file_path}")
return
# Extract top-level directory

View File

@ -1,7 +1,7 @@
from __future__ import annotations
from pydantic import BaseModel, Field, model_validator # type: ignore
from uuid import uuid4
from typing import List, Optional, Generator, ClassVar, Any, Dict, TYPE_CHECKING
from typing import List, Optional, Generator, ClassVar, Any, Dict, TYPE_CHECKING, Literal
from typing_extensions import Annotated, Union
import numpy as np # type: ignore
@ -14,7 +14,6 @@ import json
import re
from pathlib import Path
from . rag import start_file_watcher, ChromaDBFileWatcher, ChromaDBGetResponse
from . import defines
from . import Message
@ -34,16 +33,17 @@ class User(BaseModel):
model_config = {"arbitrary_types_allowed": True} # Allow ChromaDBFileWatcher, etc
username: str
llm: Any = Field(exclude=True)
llm: Any = Field(exclude=True) # Required in order to embed content into RAG
rags: List[RagEntry] = Field(default_factory=list)
first_name: str = ""
last_name: str = ""
full_name: str = ""
description: str = ""
profile_url: str = ""
rag_content_size : int = 0
contact_info : Dict[str, str] = {}
user_questions : List[Question] = []
has_profile: bool = False
is_ai: bool = False
#context: Optional[List[Context]] = []
# file_watcher : ChromaDBFileWatcher = set by initialize
@ -100,7 +100,7 @@ class User(BaseModel):
@property
def prometheus_collector(self) -> CollectorRegistry:
if not self.User__prometheus_collector:
raise ValueError("initialize() has not been called.")
raise ValueError("initialize() has not been called with a prometheus_collector.")
return self.User__prometheus_collector
@property
@ -174,12 +174,13 @@ class User(BaseModel):
@classmethod
def sanitize(cls, user: Dict[str, Any]):
sanitized : Dict[str, Any] = {}
sanitized["username"] = user.get("username")
sanitized["username"] = user.get("username", "default")
sanitized["first_name"] = user.get("first_name", sanitized["username"])
sanitized["last_name"] = user.get("last_name", "")
sanitized["full_name"] = user.get("full_name", f"{sanitized["first_name"]} {sanitized["last_name"]}")
sanitized["description"] = user.get("description", "")
sanitized["profile_url"] = user.get("profile_url", "")
profile_image = os.path.join(defines.user_dir, sanitized["username"], "profile.png")
sanitized["has_profile"] = os.path.exists(profile_image)
contact_info = user.get("contact_info", {})
sanitized["contact_info"] = {}
for key in contact_info:
@ -228,17 +229,21 @@ class User(BaseModel):
with open(info_path, 'r') as file:
data = json.load(file)
data["username"] = item
profile_image = os.path.join(defines.user_dir, item, "profile.png")
data["has_profile"] = os.path.exists(profile_image)
user_data.append(data)
except json.JSONDecodeError:
except json.JSONDecodeError as e:
# Skip files that aren't valid JSON
logger.info(f"Invalid JSON for {info_path}: {str(e)}")
continue
except Exception as e:
# Skip files that can't be read
logger.info(f"Exception processing {info_path}: {str(e)}")
continue
return user_data
def initialize(self, prometheus_collector):
async def initialize(self, prometheus_collector=None):
if self.User__initialized:
# Initialization can only be attempted once; if there are multiple attempts, it means
# a subsystem is failing or there is a logic bug in the code.
@ -247,12 +252,15 @@ class User(BaseModel):
# succeeded. This prevents server loops on failure
raise ValueError("initialize can only be attempted once")
self.User__initialized = True
if not self.username:
raise ValueError("username can not be empty")
user_dir = os.path.join(defines.user_dir, self.username)
user_info = os.path.join(user_dir, defines.user_info_file)
persist_directory=os.path.join(user_dir, defines.persist_directory)
watch_directory=os.path.join(user_dir, defines.rag_content_dir)
logger.info(f"User(username={self.username}, user_dir={user_dir} persist_directory={persist_directory}, watch_directory={watch_directory}")
vector_db_dir=os.path.join(user_dir, defines.persist_directory)
rag_content_dir=os.path.join(user_dir, defines.rag_content_dir)
logger.info(f"User(username={self.username}, user_dir={user_dir} persist_directory={vector_db_dir}, rag_content_dir={rag_content_dir}")
info = {}
# Always re-initialize the user's name and contact data from the info file in case it is changed
@ -268,8 +276,10 @@ class User(BaseModel):
self.last_name = info.get("last_name", "")
self.full_name = info.get("full_name", f"{self.first_name} {self.last_name}")
self.description = info.get("description", "")
self.profile_url = info.get("profile_url", "")
self.contact_info = info.get("contact_info", {})
profile_image = os.path.join(defines.user_dir, self.username, "profile.png")
self.has_profile = os.path.exists(profile_image)
self.is_ai = info.get("is_ai", False)
questions = info.get("questions", [ f"Tell me about {self.first_name}.", f"What are {self.first_name}'s professional strengths?"])
self.user_questions = []
for question in questions:
@ -281,15 +291,17 @@ class User(BaseModel):
except Exception as e:
logger.info(f"Unable to initialize all questions from {user_info}")
os.makedirs(persist_directory, exist_ok=True)
os.makedirs(watch_directory, exist_ok=True)
os.makedirs(vector_db_dir, exist_ok=True)
os.makedirs(rag_content_dir, exist_ok=True)
if prometheus_collector:
self.User__prometheus_collector = prometheus_collector
self.User__prometheus_collector = prometheus_collector
self.User__observer, self.User__file_watcher = start_file_watcher(
llm=self.llm,
collection_name=self.username,
persist_directory=persist_directory,
watch_directory=watch_directory,
persist_directory=vector_db_dir,
watch_directory=rag_content_dir,
recreate=False, # Don't recreate if exists
)
has_username_rag = any(item["name"] == self.username for item in self.rags)

View File

@ -2,7 +2,6 @@
"first_name": "Eliza",
"last_name": "Morgan",
"description": "Eliza Morgan is an AI generated persona. In addition, she is a conservation botanist with over a decade of experience in leading ecological restoration projects, managing native plant programs, and advancing rare plant propagation methods across the Pacific Northwest. Her proven record of scientific innovation, effective stakeholder engagement, and successful grant writing are key to her professional strengths.",
"profile_url": "https://backstory.ketrenos.com/eliza.png",
"questions": [
"Is Eliza real?",
"What are Eliza's skills?"