Refactored Resume Builder

This commit is contained in:
James Ketr 2025-04-18 15:58:16 -07:00
parent a6b38bfbbe
commit 6eaad89b1a
10 changed files with 582 additions and 356 deletions

View File

@ -1,5 +1,7 @@
div { div {
box-sizing: border-box; box-sizing: border-box;
overflow-wrap: break-word;
word-break: break-word;
} }
.TabPanel { .TabPanel {

View File

@ -340,7 +340,7 @@ const App = () => {
const [messageHistoryLength, setMessageHistoryLength] = useState<number>(5); const [messageHistoryLength, setMessageHistoryLength] = useState<number>(5);
const [tab, setTab] = useState<number>(0); const [tab, setTab] = useState<number>(0);
const [about, setAbout] = useState<string>(""); const [about, setAbout] = useState<string>("");
const [jobDescription, setJobDescription] = useState<string>(""); const [jobDescription, setJobDescription] = useState<string | undefined>(undefined);
const [resume, setResume] = useState<MessageData | undefined>(undefined); const [resume, setResume] = useState<MessageData | undefined>(undefined);
const [facts, setFacts] = useState<MessageData | undefined>(undefined); const [facts, setFacts] = useState<MessageData | undefined>(undefined);
const timerRef = useRef<any>(null); const timerRef = useRef<any>(null);
@ -1263,7 +1263,7 @@ const App = () => {
</CustomTabPanel> </CustomTabPanel>
<CustomTabPanel tab={tab} index={1}> <CustomTabPanel tab={tab} index={1}>
<ResumeBuilder {...{ isScrolledToBottom, scrollToBottom, facts, setFacts, resume, setResume, jobDescription, processing, setProcessing, setSnack, connectionBase: connectionBase, sessionId }} /> <ResumeBuilder {...{ isScrolledToBottom, setJobDescription, scrollToBottom, facts, setFacts, resume, setResume, jobDescription, processing, setProcessing, setSnack, connectionBase: connectionBase, sessionId }} />
</CustomTabPanel> </CustomTabPanel>
<CustomTabPanel tab={tab} index={2}> <CustomTabPanel tab={tab} index={2}>

View File

@ -62,7 +62,7 @@ function ChatBubble({ role, isFullWidth, children, sx }: ChatBubbleProps) {
border: `1px solid ${theme.palette.secondary.main}`, // Dusty Teal border: `1px solid ${theme.palette.secondary.main}`, // Dusty Teal
borderRadius: '16px', borderRadius: '16px',
padding: theme.spacing(1, 2), padding: theme.spacing(1, 2),
maxWidth: isFullWidth ? '100%' : '95%', maxWidth: isFullWidth ? '100%' : '100%',
minWidth: '70%', minWidth: '70%',
alignSelf: 'flex-start', alignSelf: 'flex-start',
color: theme.palette.text.primary, // Charcoal Black (#2E2E2E) — much better contrast color: theme.palette.text.primary, // Charcoal Black (#2E2E2E) — much better contrast

45
frontend/src/Document.tsx Normal file
View File

@ -0,0 +1,45 @@
import React from 'react';
import { Box, Typography } from '@mui/material';
import { SxProps, Theme } from '@mui/material';
/**
* Props for the Document component
* @interface DocumentComponentProps
* @property {string} title - The title of the document
* @property {React.ReactNode} [children] - The content of the document
*/
interface DocumentComponentProps {
title: string;
children?: React.ReactNode;
sx?: SxProps<Theme>;
}
/**
* Document component renders a container with optional title and scrollable content
*
* This component provides a consistent document viewing experience across the application
* with a title header and scrollable content area
*/
const Document: React.FC<DocumentComponentProps> = ({ title, children, sx }) => (
<Box
sx={{
...sx,
display: 'flex',
flexDirection: 'column',
flexGrow: 1,
overflow: 'hidden',
}}
>
{
title !== "" &&
<Typography sx={{ pl: 1, pr: 1, display: 'flex', mt: -1, fontWeight: 'bold' }}>{title}</Typography>
}
<Box sx={{ display: 'flex', p: 1, flexGrow: 1, overflow: 'auto' }}>
{children}
</Box>
</Box>
);
export {
Document
};

View File

@ -0,0 +1,27 @@
import { SxProps, Theme } from '@mui/material';
import { MessageData } from './MessageMeta';
/**
* Props for the DocumentViewer component
* @interface DocumentViewerProps
* @property {function} generateResume - Function to generate a resume based on job description
* @property {MessageData | undefined} resume - The generated resume data
* @property {function} setResume - Function to set the generated resume
* @property {function} factCheck - Function to fact check the generated resume
* @property {MessageData | undefined} facts - The fact check results
* @property {function} setFacts - Function to set the fact check results
* @property {string} jobDescription - The initial job description
* @property {function} setJobDescription - Function to set the job description
* @property {SxProps<Theme>} [sx] - Optional styling properties
*/
export interface DocumentViewerProps {
generateResume: (jobDescription: string) => void;
resume: MessageData | undefined;
setResume: (resume: MessageData | undefined) => void;
factCheck: (resume: string) => void;
facts: MessageData | undefined;
setFacts: (facts: MessageData | undefined) => void;
jobDescription: string | undefined;
setJobDescription: (jobDescription: string | undefined) => void;
sx?: SxProps<Theme>;
}

View File

@ -12,255 +12,351 @@ import {
Divider, Divider,
Slider, Slider,
Stack, Stack,
TextField TextField,
Tooltip
} from '@mui/material'; } from '@mui/material';
import Tooltip from '@mui/material/Tooltip';
import { useTheme } from '@mui/material/styles'; import { useTheme } from '@mui/material/styles';
import SendIcon from '@mui/icons-material/Send'; import SendIcon from '@mui/icons-material/Send';
import { import {
ChevronLeft, ChevronLeft,
ChevronRight, ChevronRight,
SwapHoriz, SwapHoriz,
RestartAlt as ResetIcon,
} from '@mui/icons-material'; } from '@mui/icons-material';
import { SxProps, Theme } from '@mui/material';
import PropagateLoader from "react-spinners/PropagateLoader"; import PropagateLoader from "react-spinners/PropagateLoader";
import { MessageData } from './MessageMeta';
import { Message } from './Message'; import { Message } from './Message';
import { Document } from './Document';
import { DocumentViewerProps } from './DocumentTypes';
import MuiMarkdown from 'mui-markdown';
interface DocumentComponentProps { /**
title: string; * DocumentViewer component
children?: React.ReactNode; *
} * A responsive component that displays job descriptions, generated resumes and fact checks
* with different layouts for mobile and desktop views.
interface DocumentViewerProps { */
generateResume: (jobDescription: string) => void, const DocumentViewer: React.FC<DocumentViewerProps> = ({
factCheck: (resume: string) => void, generateResume,
resume: MessageData | undefined, jobDescription,
facts: MessageData | undefined, factCheck,
jobDescription: string, resume,
sx?: SxProps<Theme>, setResume,
}; facts,
setFacts,
// Document component sx
const Document: React.FC<DocumentComponentProps> = ({ title, children }) => ( }) => {
<Box // State for editing job description
sx={{ const [editJobDescription, setEditJobDescription] = useState<string | undefined>(jobDescription);
display: 'flex', // Processing state to show loading indicators
flexDirection: 'column', const [processing, setProcessing] = useState<string | undefined>(undefined);
flexGrow: 1, // Theme and responsive design setup
overflow: 'hidden',
}}
>
{
title !== "" &&
<Typography sx={{ pl: 1, pr: 1, display: 'flex', mt: -1, fontWeight: 'bold' }}>{title}</Typography>
}
<Box sx={{ display: 'flex', p: 1, flexGrow: 1, overflow: 'auto' }}>
{children}
</Box>
</Box>
);
const DocumentViewer: React.FC<DocumentViewerProps> = ({ generateResume, jobDescription, factCheck, resume, facts, sx }: DocumentViewerProps) => {
const [editJobDescription, setEditJobDescription] = useState<string>(jobDescription);
const [processing, setProcessing] = useState<boolean>(false);
const theme = useTheme(); const theme = useTheme();
const isMobile = useMediaQuery(theme.breakpoints.down('md')); const isMobile = useMediaQuery(theme.breakpoints.down('md'));
// State for controlling which document is active on mobile // State for controlling which document is active on mobile
const [activeDocMobile, setActiveDocMobile] = useState<number>(0); const [activeTab, setActiveTab] = useState<number>(0);
// State for controlling split ratio on desktop // State for controlling split ratio on desktop
const [splitRatio, setSplitRatio] = useState<number>(50); const [splitRatio, setSplitRatio] = useState<number>(100);
/**
* Reset processing state when resume is generated
*/
useEffect(() => { useEffect(() => {
if (processing && resume !== undefined) { if (resume !== undefined && processing === "resume") {
setProcessing(false); setProcessing(undefined);
} }
}, [processing, resume, setProcessing]); }, [processing, resume]);
const triggerGeneration = useCallback((jobDescription: string) => { /**
setProcessing(true); * Reset processing state when facts is generated
setActiveDocMobile(1); */
useEffect(() => {
if (facts !== undefined && processing === "facts") {
setProcessing(undefined);
}
}, [processing, facts]);
/**
* Trigger resume generation and update UI state
*/
const triggerGeneration = useCallback((jobDescription: string | undefined) => {
if (jobDescription === undefined) {
setProcessing(undefined);
setResume(undefined);
setActiveTab(0);
return;
}
setProcessing("resume");
setTimeout(() => { setActiveTab(1); }, 250); // Switch to resume view on mobile
generateResume(jobDescription); generateResume(jobDescription);
}, [setProcessing, generateResume]); }, [generateResume, setProcessing, setActiveTab, setResume]);
/**
* Trigger fact check and update UI state
*/
const triggerFactCheck = useCallback((resume: string | undefined) => {
if (resume === undefined) {
setProcessing(undefined);
setResume(undefined);
setFacts(undefined);
setActiveTab(1);
return;
}
setProcessing("facts");
factCheck(resume);
setTimeout(() => { setActiveTab(2); }, 250); // Switch to resume view on mobile
}, [factCheck, setResume, setProcessing, setActiveTab, setFacts]);
/**
* Switch to resume tab when resume become available
*/
useEffect(() => {
if (resume !== undefined) {
setTimeout(() => { setActiveTab(1); }, 250); // Switch to resume view on mobile
}
}, [resume]);
/**
* Switch to fact check tab when facts become available
*/
useEffect(() => { useEffect(() => {
if (facts !== undefined) { if (facts !== undefined) {
setActiveDocMobile(2); setTimeout(() => { setActiveTab(2); }, 250); // Switch to resume view on mobile
} }
}, [facts, setActiveDocMobile]); }, [facts]);
// Handle tab change for mobile /**
* Handle tab change for mobile view
*/
const handleTabChange = (_event: React.SyntheticEvent, newValue: number): void => { const handleTabChange = (_event: React.SyntheticEvent, newValue: number): void => {
setActiveDocMobile(newValue); setActiveTab(newValue);
}; };
// Adjust split ratio /**
* Adjust split ratio for desktop view
*/
const handleSliderChange = (_event: Event, newValue: number | number[]): void => { const handleSliderChange = (_event: Event, newValue: number | number[]): void => {
setSplitRatio(newValue as number); setSplitRatio(newValue as number);
}; };
// Reset split ratio /**
* Reset split ratio to default
*/
const resetSplit = (): void => { const resetSplit = (): void => {
setSplitRatio(50); setSplitRatio(50);
}; };
const handleKeyPress = (event: any) => { /**
* Handle keyboard shortcuts
*/
const handleKeyPress = (event: React.KeyboardEvent): void => {
if (event.key === 'Enter' && event.ctrlKey) { if (event.key === 'Enter' && event.ctrlKey) {
triggerGeneration(editJobDescription); triggerGeneration(editJobDescription || "");
} }
}; };
// Mobile view const renderJobDescriptionView = () => {
const jobDescription = [];
if (resume === undefined && processing === undefined) {
jobDescription.push(
<Document key="jobDescription" sx={{ display: "flex", flexGrow: 1 }} title="">
<TextField
variant="outlined"
fullWidth
multiline
type="text"
sx={{
flex: 1,
flexGrow: 1,
maxHeight: '100%',
overflow: 'auto',
}}
value={editJobDescription}
onChange={(e) => setEditJobDescription(e.target.value)}
onKeyDown={handleKeyPress}
placeholder="Paste a job description, then click Generate..."
/>
</Document>
);
} else {
jobDescription.push(<MuiMarkdown key="jobDescription" >{editJobDescription}</MuiMarkdown>)
}
jobDescription.push(
<Box key="jobActions" sx={{ display: "flex", justifyContent: "center", flexDirection: "row" }}>
<IconButton
sx={{ display: "flex", margin: 'auto 0px' }}
size="large"
edge="start"
color="inherit"
disabled={processing !== undefined}
onClick={() => { setEditJobDescription(""); triggerGeneration(undefined); }}
>
<Tooltip title="Reset Job Description">
<ResetIcon />
</Tooltip>
</IconButton>
<Tooltip title="Generate">
<Button
sx={{ m: 1, gap: 1, flexGrow: 1 }}
variant="contained"
onClick={() => { triggerGeneration(editJobDescription); }}
>
Generate<SendIcon />
</Button>
</Tooltip>
</Box>
);
return jobDescription;
}
/**
* Renders the resume view with loading indicator
*/
const renderResumeView = () => (
<Box key="ResumeView" sx={{ display: "flex", flexDirection: "column", overflow: "auto", flexGrow: 1, flexBasis: 0 }}>
<Document sx={{ display: "flex", flexGrow: 1 }} title="">
{resume !== undefined && <Message message={resume} />}
</Document>
{processing === "resume" && (
<Box sx={{
display: "flex",
flexDirection: "column",
alignItems: "center",
justifyContent: "center",
mb: 1,
height: "10px"
}}>
<PropagateLoader
size="10px"
loading={true}
aria-label="Loading Spinner"
data-testid="loader"
/>
<Typography>Generating resume...</Typography>
</Box>
)}
<ResumeActionCard
resume={resume}
processing={processing}
triggerFactCheck={triggerFactCheck}
/>
</Box>
);
/**
* Renders the fact check view
*/
const renderFactCheckView = () => (
<Box key="FactView" sx={{ display: "flex", flexDirection: "column", overflow: "auto", flexGrow: 1, flexBasis: 0, p: 0 }}>
<Document sx={{ display: "flex", flexGrow: 1 }} title="">
{facts !== undefined && <Message message={facts} />}
</Document>
{processing === "facts" && (
<Box sx={{
display: "flex",
flexDirection: "column",
alignItems: "center",
justifyContent: "center",
mb: 1,
height: "10px"
}}>
<PropagateLoader
size="10px"
loading={true}
aria-label="Loading Spinner"
data-testid="loader"
/>
<Typography>Fact Checking resume...</Typography>
</Box>
)}
</Box>
);
// Render mobile view
if (isMobile) { if (isMobile) {
/**
* Gets the appropriate content based on active tab
*/
const getActiveMobileContent = () => {
switch (activeTab) {
case 0:
return renderJobDescriptionView();
case 1:
return renderResumeView();
case 2:
return renderFactCheckView();
default:
return renderJobDescriptionView();
}
};
return ( return (
<Box sx={{ display: 'flex', flexDirection: 'column', flexGrow: 1, ...sx }}> <Box sx={{ display: 'flex', flexDirection: 'column', flexGrow: 1, ...sx }}>
{/* Tabs */} {/* Tabs */}
<Tabs <Tabs
value={activeDocMobile} value={activeTab}
onChange={handleTabChange} onChange={handleTabChange}
variant="fullWidth" variant="fullWidth"
sx={{ bgcolor: 'background.paper' }} sx={{ bgcolor: 'background.paper' }}
> >
<Tab label="Job Description" /> <Tab label="Job Description" />
<Tab label="Resume" /> {(resume !== undefined || processing === "resume") && <Tab label="Resume" />}
{facts !== undefined && <Tab label="Fact Check" />} {(facts !== undefined || processing === "facts") && <Tab label="Fact Check" />}
</Tabs> </Tabs>
{/* Document display area */} {/* Document display area */}
<Box sx={{ display: 'flex', flexDirection: 'column', flexGrow: 1, overflow: 'hidden', p: 2 }}> <Box sx={{ display: 'flex', flexDirection: 'column', flexGrow: 1, overflow: 'hidden', p: 0 }}>
{activeDocMobile === 0 ? (<> {getActiveMobileContent()}
<Document title="">
<TextField
variant="outlined"
fullWidth
multiline
type="text"
sx={{
flex: 1, // Makes the TextField fill the parent height
flexGrow: 1,
maxHeight: '100%', // Prevents it from growing larger than the parent height
overflow: 'auto', // Enables scrollbars if the content overflows
}}
value={editJobDescription}
onChange={(e) => setEditJobDescription(e.target.value)}
onKeyDown={handleKeyPress}
// placeholder="Paste a job description (or URL that resolves to one), then click Generate..."
placeholder="Paste a job description, then click Generate..."
/>
</Document>
<Tooltip title="Generate">
<Button sx={{ m: 1, gap: 1 }} variant="contained" onClick={() => { triggerGeneration(editJobDescription); }}>Generate<SendIcon /></Button>
</Tooltip>
</>) : (activeDocMobile === 1 ? (<Box sx={{ display: "flex", flexDirection: "column", overflow: "auto" }}>
<Document title="">{resume !== undefined && <Message message={resume} />}</Document>
{processing === true && <>
<Box sx={{
display: "flex",
flexDirection: "column",
alignItems: "center",
justifyContent: "center",
mb: 1,
height: "10px"
}}>
<PropagateLoader
size="10px"
loading={processing}
aria-label="Loading Spinner"
data-testid="loader"
/>
<Typography>Generating resume...</Typography>
</Box>
</>}
<Card sx={{ display: "flex", overflow: "auto", minHeight: "fit-content", p: 1, flexDirection: "column" }}>
{resume !== undefined || processing === true
? <>
<Typography><b>NOTE:</b> As with all LLMs, hallucination is always a possibility. If the generated resume seems too good to be true, <b>Fact Check</b> or, expand the <b>LLM information for this query</b> section (at the end of the resume) and click the links in the <b>Top RAG</b> matches to view the relavent RAG source document to read the details. Or go back to 'Backstory' and ask a question.</Typography> {processing === false && <Tooltip title="Fact Check">
<Button sx={{ m: 1, gap: 1 }} variant="contained" onClick={() => { resume && factCheck(resume.content); }}>Fact Check<SendIcon /></Button>
</Tooltip>}</>
: <Typography>Once you click <b>Generate</b> under the <b>Job Description</b>, a resume will be generated based on the user's RAG content and the job description.</Typography>
}
</Card>
</Box>) :
(<Box sx={{ display: "flex", flexDirection: "column", overflow: "auto" }}>
<Document title="">{facts !== undefined && <Message message={facts} />}</Document>
</Box>))}
</Box> </Box>
</Box> </Box>
); );
} }
// Desktop view /**
return ( * Gets the appropriate content based on active state for Desktop
<Box sx={{ display: 'flex', flexDirection: 'column', flexGrow: 1, ...sx }}> */
{/* Split document view */} const getActiveDesktopContent = () => {
<Box sx={{ display: 'flex', flexGrow: 1, overflow: 'hidden', p: 2 }}> /* Left panel - Job Description */
<Box sx={{ display: 'flex', flexDirection: 'column', width: `${splitRatio}%`, pr: 1, flexGrow: 1, overflow: 'hidden' }}> const showResume = resume !== undefined || processing === "resume"
<Document title="Job Description"> const showFactCheck = facts !== undefined || processing === "facts"
<TextField const otherRatio = showResume ? (100 - splitRatio / 2) : 100;
variant="outlined" const children = [];
fullWidth children.push(
type="text" <Box key="JobDescription" sx={{ display: 'flex', flexDirection: 'column', width: `${otherRatio}%`, p: 0, flexGrow: 1, overflow: 'hidden' }}>
multiline {renderJobDescriptionView()}
sx={{ </Box>);
flex: 1, // Makes the TextField fill the parent height
flexGrow: 1,
maxHeight: '100%', // Prevents it from growing larger than the parent height
overflow: 'auto', // Enables scrollbars if the content overflows
}}
value={editJobDescription}
onChange={(e) => setEditJobDescription(e.target.value)}
onKeyDown={handleKeyPress}
// placeholder="Paste a job description (or URL that resolves to one), then click Generate..."
placeholder="Paste a job description, then click Generate..."
/>
</Document>
<Tooltip title="Generate">
<Button sx={{ m: 1, gap: 1 }} variant="contained" onClick={() => { triggerGeneration(editJobDescription); }}>Generate<SendIcon /></Button>
</Tooltip>
</Box>
<Divider orientation="vertical" flexItem />
<Box sx={{ display: 'flex', width: `${100 - splitRatio}%`, pl: 1, flexGrow: 1, flexDirection: 'column' }}>
<Document title="">{resume !== undefined && <Message message={resume} />}</Document>
<Box sx={{
display: "flex",
flexDirection: "column",
alignItems: "center",
justifyContent: "center",
mb: 1
}}>
<PropagateLoader
size="10px"
loading={processing}
aria-label="Loading Spinner"
data-testid="loader"
/>
</Box>
<Card sx={{ display: "flex", overflow: "auto", minHeight: "fit-content", p: 1, flexDirection: "column" }}>
{resume !== undefined || processing === true
? <>
<Typography><b>NOTE:</b> As with all LLMs, hallucination is always a possibility. If the generated resume seems too good to be true, <b>Fact Check</b> or, expand the <b>LLM information for this query</b> section (at the end of the resume) and click the links in the <b>Top RAG</b> matches to view the relavent RAG source document to read the details. Or go back to 'Backstory' and ask a question.</Typography> { processing === false && <Tooltip title="Fact Check">
<Button sx={{ m: 1, gap: 1 }} variant="contained" onClick={() => { resume && factCheck(resume.content); }}>Fact Check<SendIcon /></Button>
</Tooltip>}</>
: <Typography>Once you click <b>Generate</b> under the <b>Job Description</b>, a resume will be generated based on the user's RAG content and the job description.</Typography>
}
</Card>
</Box>
{
facts !== undefined && <>
<Box sx={{ display: 'flex', width: `${100 - splitRatio}%`, pl: 1, flexGrow: 1, flexDirection: 'column' }}>
<Divider orientation="vertical" flexItem />
<Document title=""><Message message={facts} /></Document>
</Box>
</>}
</Box>
{/* Split control panel */} /* Resume panel - conditionally rendered if resume defined, or processing is in progress */
if (showResume) {
children.push(
<Box key="ResumeView" sx={{ display: 'flex', width: '100%', p: 0, flexGrow: 1, flexDirection: 'row' }}>
<Divider orientation="vertical" flexItem />
{renderResumeView()}
</Box>
);
}
<Paper sx={{ p: 2, display: 'flex', alignItems: 'center', justifyContent: 'center' }}> /* Fact Check panel - conditionally rendered if facts defined, or processing is in progress */
if (showFactCheck) {
children.push(
<Box key="FactCheckView" sx={{ display: 'flex', width: `${otherRatio}%`, p: 0, flexGrow: 1, flexDirection: 'row' }}>
<Divider orientation="vertical" flexItem />
{renderFactCheckView()}
</Box>
);
}
/* Split control panel - conditionally rendered if either facts or resume is set */
let slider = <Box key="slider"></Box>;
if (showResume || showFactCheck) {
slider = (
<Paper key="slider" sx={{ p: 2, display: 'flex', alignItems: 'center', justifyContent: 'center' }}>
<Stack direction="row" spacing={2} alignItems="center" sx={{ width: '60%' }}> <Stack direction="row" spacing={2} alignItems="center" sx={{ width: '60%' }}>
<IconButton onClick={() => setSplitRatio(Math.max(20, splitRatio - 10))}> <IconButton onClick={() => setSplitRatio(Math.max(0, splitRatio - 10))}>
<ChevronLeft /> <ChevronLeft />
</IconButton> </IconButton>
@ -268,11 +364,11 @@ const DocumentViewer: React.FC<DocumentViewerProps> = ({ generateResume, jobDesc
value={splitRatio} value={splitRatio}
onChange={handleSliderChange} onChange={handleSliderChange}
aria-label="Split ratio" aria-label="Split ratio"
min={20} min={0}
max={80} max={100}
/> />
<IconButton onClick={() => setSplitRatio(Math.min(80, splitRatio + 10))}> <IconButton onClick={() => setSplitRatio(Math.min(100, splitRatio + 10))}>
<ChevronRight /> <ChevronRight />
</IconButton> </IconButton>
@ -280,13 +376,82 @@ const DocumentViewer: React.FC<DocumentViewerProps> = ({ generateResume, jobDesc
<SwapHoriz /> <SwapHoriz />
</IconButton> </IconButton>
</Stack> </Stack>
</Paper> </Paper>
);
}
return (
<Box sx={{ display: 'flex', flexGrow: 1, flexDirection: 'column', overflow: 'hidden', p: 0 }}>
<Box sx={{ display: 'flex', flexGrow: 1, flexDirection: 'row', overflow: 'hidden', p: 0 }}>
{children}
</Box>
{slider}
</Box>
)
}
return (
<Box sx={{ display: 'flex', flexDirection: 'column', flexGrow: 1, ...sx }}>
{getActiveDesktopContent()}
</Box> </Box>
); );
}; };
export type {
DocumentViewerProps
};
export { DocumentViewer }; /**
* Props for the ResumeActionCard component
*/
interface ResumeActionCardProps {
resume: any;
processing: string | undefined;
triggerFactCheck: (resume: string | undefined) => void;
}
/**
* Action card displayed underneath the resume with notes and fact check button
*/
const ResumeActionCard: React.FC<ResumeActionCardProps> = ({ resume, processing, triggerFactCheck }) => (
<Box sx={{ display: "flex", justifyContent: "center", flexDirection: "column" }}>
<Card sx={{ display: "flex", overflow: "auto", minHeight: "fit-content", p: 1, flexDirection: "column" }}>
{resume !== undefined || processing === "resume" ? (
<Typography>
<b>NOTE:</b> As with all LLMs, hallucination is always a possibility. If the generated resume seems too good to be true, <b>Fact Check</b> or, expand the <b>LLM information for this query</b> section (at the end of the resume) and click the links in the <b>Top RAG</b> matches to view the relavent RAG source document to read the details. Or go back to 'Backstory' and ask a question.
</Typography>
) : (
<Typography>
Once you click <b>Generate</b> under the <b>Job Description</b>, a resume will be generated based on the user's RAG content and the job description.
</Typography>
)}
</Card>
<Box sx={{ display: "flex", justifyContent: "center", flexDirection: "row", flexGrow: 1 }}>
<IconButton
sx={{ display: "flex", margin: 'auto 0px' }}
size="large"
edge="start"
color="inherit"
disabled={processing === "resume"}
onClick={() => { triggerFactCheck(undefined); }}
>
<Tooltip title="Reset Resume">
<ResetIcon />
</Tooltip>
</IconButton>
<Tooltip title="Fact Check">
<span style={{ display: "flex", flexGrow: 1 }}>
<Button
sx={{ m: 1, gap: 1, flexGrow: 1 }}
variant="contained"
disabled={processing === "facts"}
onClick={() => { resume && triggerFactCheck(resume.content); }}
>
Fact Check<SendIcon />
</Button>
</span>
</Tooltip>
</Box>
</Box>
);
export {
DocumentViewer
};

View File

@ -50,7 +50,7 @@ const Message = ({ message, submitQuery, isFullWidth }: MessageInterface) => {
const formattedContent = message.content.trim(); const formattedContent = message.content.trim();
return ( return (
<ChatBubble isFullWidth={isFullWidth} role={message.role} sx={{ flexGrow: 1, pb: message.metadata ? 0 : "8px", mb: 1, mt: 1 }}> <ChatBubble isFullWidth={isFullWidth} role={message.role} sx={{ flexGrow: 1, pb: message.metadata ? 0 : "8px", m: 0, mb: 1, mt: 1 }}>
<CardContent> <CardContent>
{message.role !== 'user' ? {message.role !== 'user' ?
<StyledMarkdown {...{ content: formattedContent, submitQuery }} /> <StyledMarkdown {...{ content: formattedContent, submitQuery }} />

View File

@ -1,4 +1,4 @@
import { useState, useCallback, useEffect, } from 'react'; import { useState, useCallback, } from 'react';
import Box from '@mui/material/Box'; import Box from '@mui/material/Box';
import { SeverityType } from './Snack'; import { SeverityType } from './Snack';
import { ContextStatus } from './ContextStatus'; import { ContextStatus } from './ContextStatus';
@ -17,10 +17,11 @@ interface ResumeBuilderProps {
setResume: (resume: MessageData | undefined) => void, setResume: (resume: MessageData | undefined) => void,
facts: MessageData | undefined, facts: MessageData | undefined,
setFacts: (facts: MessageData | undefined) => void, setFacts: (facts: MessageData | undefined) => void,
jobDescription: string, jobDescription: string | undefined,
setJobDescription: (jobDescription: string | undefined) => void
}; };
const ResumeBuilder = ({ jobDescription, facts, setFacts, resume, setResume, setProcessing, processing, connectionBase, sessionId, setSnack }: ResumeBuilderProps) => { const ResumeBuilder = ({ jobDescription, setJobDescription, facts, setFacts, resume, setResume, setProcessing, processing, connectionBase, sessionId, setSnack }: ResumeBuilderProps) => {
const [lastEvalTPS, setLastEvalTPS] = useState<number>(35); const [lastEvalTPS, setLastEvalTPS] = useState<number>(35);
const [lastPromptTPS, setLastPromptTPS] = useState<number>(430); const [lastPromptTPS, setLastPromptTPS] = useState<number>(430);
const [contextStatus, setContextStatus] = useState<ContextStatus>({ context_used: 0, max_context: 0 }); const [contextStatus, setContextStatus] = useState<ContextStatus>({ context_used: 0, max_context: 0 });
@ -191,7 +192,6 @@ const ResumeBuilder = ({ jobDescription, facts, setFacts, resume, setResume, set
const factCheck = async (resume: string) => { const factCheck = async (resume: string) => {
if (!resume.trim()) return; if (!resume.trim()) return;
setFacts(undefined); setFacts(undefined);
setSnack('Fact Check is still under development', 'warning');
try { try {
setProcessing(true); setProcessing(true);
@ -294,20 +294,21 @@ const ResumeBuilder = ({ jobDescription, facts, setFacts, resume, setResume, set
return ( return (
<Box className="DocBox"> <Box className="DocBox">
<Box className="Conversation"> <Box className="Conversation" sx={{ p: 0, pt: 1 }}>
<DocumentViewer sx={{ <DocumentViewer sx={{
p: 0,
m: 0,
display: "flex", display: "flex",
flexGrow: 1, flexGrow: 1,
overflowY: "auto", overflowY: "auto",
flexDirection: "column", flexDirection: "column",
height: "calc(0vh - 0px)", // Hack to make the height work height: "calc(0vh - 0px)", // Hack to make the height work
}} {...{ factCheck, facts, jobDescription, generateResume, resume }} /> }} {...{ factCheck, facts, jobDescription, generateResume, resume, setFacts, setResume, setJobDescription }} />
</Box> </Box>
</Box> </Box>
); );
} }
export type { export type {
ResumeBuilderProps ResumeBuilderProps
}; };

View File

@ -256,21 +256,7 @@ const VectorVisualizer: React.FC<VectorVisualizerProps> = ({ setSnack, connectio
</Typography> </Typography>
</Card> </Card>
<Box sx={{ display: 'flex', flexGrow: 1, justifyContent: 'center', alignItems: 'center' }}> <Box sx={{ display: 'flex', flexGrow: 1, justifyContent: 'center', alignItems: 'center' }}>
<Plot <Plot
onHover={(event: any) => {
const point = event.points[0];
console.log('Point:', point);
const type = point.customdata.type;
const text = point.customdata.doc;
const emoji = emojiMap[type] || '❓';
setTooltip({
visible: true,
background: point['marker.color'],
color: getTextColorForBackground(point['marker.color']),
content: `${emoji} ${type.toUpperCase()}\n${text}`,
});
}}
onClick={(event: any) => { onClick={(event: any) => {
const point = event.points[0]; const point = event.points[0];
console.log('Point:', point); console.log('Point:', point);

View File

@ -20,14 +20,14 @@ def try_import(module_name, pip_name=None):
print(f" pip install {pip_name or module_name}") print(f" pip install {pip_name or module_name}")
# Third-party modules with import checks # Third-party modules with import checks
try_import('ollama') try_import("ollama")
try_import('requests') try_import("requests")
try_import('bs4', 'beautifulsoup4') try_import("bs4", "beautifulsoup4")
try_import('fastapi') try_import("fastapi")
try_import('uvicorn') try_import("uvicorn")
try_import('numpy') try_import("numpy")
try_import('umap') try_import("umap")
try_import('sklearn') try_import("sklearn")
import ollama import ollama
import requests import requests
@ -59,9 +59,9 @@ rags = [
def get_installed_ram(): def get_installed_ram():
try: try:
with open('/proc/meminfo', 'r') as f: with open("/proc/meminfo", "r") as f:
meminfo = f.read() meminfo = f.read()
match = re.search(r'MemTotal:\s+(\d+)', meminfo) match = re.search(r"MemTotal:\s+(\d+)", meminfo)
if match: if match:
return f"{math.floor(int(match.group(1)) / 1000**2)}GB" # Convert KB to GB return f"{math.floor(int(match.group(1)) / 1000**2)}GB" # Convert KB to GB
except Exception as e: except Exception as e:
@ -71,12 +71,12 @@ def get_graphics_cards():
gpus = [] gpus = []
try: try:
# Run the ze-monitor utility # Run the ze-monitor utility
result = subprocess.run(['ze-monitor'], capture_output=True, text=True, check=True) result = subprocess.run(["ze-monitor"], capture_output=True, text=True, check=True)
# Clean up the output (remove leading/trailing whitespace and newlines) # Clean up the output (remove leading/trailing whitespace and newlines)
output = result.stdout.strip() output = result.stdout.strip()
for index in range(len(output.splitlines())): for index in range(len(output.splitlines())):
result = subprocess.run(['ze-monitor', '--device', f'{index+1}', '--info'], capture_output=True, text=True, check=True) result = subprocess.run(["ze-monitor", "--device", f"{index+1}", "--info"], capture_output=True, text=True, check=True)
gpu_info = result.stdout.strip().splitlines() gpu_info = result.stdout.strip().splitlines()
gpu = { gpu = {
"discrete": True, # Assume it's discrete initially "discrete": True, # Assume it's discrete initially
@ -85,17 +85,17 @@ def get_graphics_cards():
} }
gpus.append(gpu) gpus.append(gpu)
for line in gpu_info: for line in gpu_info:
match = re.match(r'^Device: [^(]*\((.*)\)', line) match = re.match(r"^Device: [^(]*\((.*)\)", line)
if match: if match:
gpu["name"] = match.group(1) gpu["name"] = match.group(1)
continue continue
match = re.match(r'^\s*Memory: (.*)', line) match = re.match(r"^\s*Memory: (.*)", line)
if match: if match:
gpu["memory"] = match.group(1) gpu["memory"] = match.group(1)
continue continue
match = re.match(r'^.*Is integrated with host: Yes.*', line) match = re.match(r"^.*Is integrated with host: Yes.*", line)
if match: if match:
gpu["discrete"] = False gpu["discrete"] = False
continue continue
@ -106,10 +106,10 @@ def get_graphics_cards():
def get_cpu_info(): def get_cpu_info():
try: try:
with open('/proc/cpuinfo', 'r') as f: with open("/proc/cpuinfo", "r") as f:
cpuinfo = f.read() cpuinfo = f.read()
model_match = re.search(r'model name\s+:\s+(.+)', cpuinfo) model_match = re.search(r"model name\s+:\s+(.+)", cpuinfo)
cores_match = re.findall(r'processor\s+:\s+\d+', cpuinfo) cores_match = re.findall(r"processor\s+:\s+\d+", cpuinfo)
if model_match and cores_match: if model_match and cores_match:
return f"{model_match.group(1)} with {len(cores_match)} cores" return f"{model_match.group(1)} with {len(cores_match)} cores"
except Exception as e: except Exception as e:
@ -200,8 +200,8 @@ def parse_args():
parser.add_argument("--ollama-model", type=str, default=MODEL_NAME, help=f"LLM model to use. default={MODEL_NAME}") parser.add_argument("--ollama-model", type=str, default=MODEL_NAME, help=f"LLM model to use. default={MODEL_NAME}")
parser.add_argument("--web-host", type=str, default=WEB_HOST, help=f"Host to launch Flask web server. default={WEB_HOST} only if --web-disable not specified.") parser.add_argument("--web-host", type=str, default=WEB_HOST, help=f"Host to launch Flask web server. default={WEB_HOST} only if --web-disable not specified.")
parser.add_argument("--web-port", type=str, default=WEB_PORT, help=f"Port to launch Flask web server. default={WEB_PORT} only if --web-disable not specified.") parser.add_argument("--web-port", type=str, default=WEB_PORT, help=f"Port to launch Flask web server. default={WEB_PORT} only if --web-disable not specified.")
parser.add_argument('--level', type=str, choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], parser.add_argument("--level", type=str, choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
default=LOG_LEVEL, help=f'Set the logging level. default={LOG_LEVEL}') default=LOG_LEVEL, help=f"Set the logging level. default={LOG_LEVEL}")
return parser.parse_args() return parser.parse_args()
def setup_logging(level): def setup_logging(level):
@ -209,7 +209,7 @@ def setup_logging(level):
if not isinstance(numeric_level, int): if not isinstance(numeric_level, int):
raise ValueError(f"Invalid log level: {level}") raise ValueError(f"Invalid log level: {level}")
logging.basicConfig(level=numeric_level, format='%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s') logging.basicConfig(level=numeric_level, format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s")
logging.info(f"Logging is set to {level} level.") logging.info(f"Logging is set to {level} level.")
@ -230,26 +230,26 @@ async def AnalyzeSite(url, question):
try: try:
# Fetch the webpage # Fetch the webpage
headers = { headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
} }
logging.info(f"Fetching {url}") logging.info(f"Fetching {url}")
response = requests.get(url, headers=headers, timeout=10) response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status() response.raise_for_status()
logging.info(f"{url} returned. Processing...") logging.info(f"{url} returned. Processing...")
# Parse the HTML # Parse the HTML
soup = BeautifulSoup(response.text, 'html.parser') soup = BeautifulSoup(response.text, "html.parser")
# Remove script and style elements # Remove script and style elements
for script in soup(["script", "style"]): for script in soup(["script", "style"]):
script.extract() script.extract()
# Get text content # Get text content
text = soup.get_text(separator=' ', strip=True) text = soup.get_text(separator=" ", strip=True)
# Clean up text (remove extra whitespace) # Clean up text (remove extra whitespace)
lines = (line.strip() for line in text.splitlines()) lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = ' '.join(chunk for chunk in chunks if chunk) text = " ".join(chunk for chunk in chunks if chunk)
# Limit text length if needed (Ollama may have token limits) # Limit text length if needed (Ollama may have token limits)
max_chars = 100000 max_chars = 100000
@ -265,12 +265,12 @@ async def AnalyzeSite(url, question):
system="You are given the contents of {url}. Answer the question about the contents", system="You are given the contents of {url}. Answer the question about the contents",
prompt=prompt) prompt=prompt)
#logging.info(response['response']) #logging.info(response["response"])
return { return {
'source': 'summarizer-llm', "source": "summarizer-llm",
'content': response['response'], "content": response["response"],
'metadata': DateTime() "metadata": DateTime()
} }
except requests.exceptions.RequestException as e: except requests.exceptions.RequestException as e:
@ -306,40 +306,40 @@ async def handle_tool_calls(message):
tools_used = [] tools_used = []
all_responses = [] all_responses = []
for i, tool_call in enumerate(message['tool_calls']): for i, tool_call in enumerate(message["tool_calls"]):
arguments = tool_call['function']['arguments'] arguments = tool_call["function"]["arguments"]
tool = tool_call['function']['name'] tool = tool_call["function"]["name"]
# Yield status update before processing each tool # Yield status update before processing each tool
yield {"status": "processing", "message": f"Processing tool {i+1}/{len(message['tool_calls'])}: {tool}..."} yield {"status": "processing", "message": f"Processing tool {i+1}/{len(message['tool_call'])}: {tool}..."}
# Process the tool based on its type # Process the tool based on its type
match tool: match tool:
case 'TickerValue': case "TickerValue":
ticker = arguments.get('ticker') ticker = arguments.get("ticker")
if not ticker: if not ticker:
ret = None ret = None
else: else:
ret = TickerValue(ticker) ret = TickerValue(ticker)
tools_used.append({ "tool": f"{tool}({ticker})", "result": ret}) tools_used.append({ "tool": f"{tool}({ticker})", "result": ret})
case 'AnalyzeSite': case "AnalyzeSite":
url = arguments.get('url') url = arguments.get("url")
question = arguments.get('question', 'what is the summary of this content?') question = arguments.get("question", "what is the summary of this content?")
# Additional status update for long-running operations # Additional status update for long-running operations
yield {"status": "processing", "message": f"Retrieving and summarizing content from {url}..."} yield {"status": "processing", "message": f"Retrieving and summarizing content from {url}..."}
ret = await AnalyzeSite(url, question) ret = await AnalyzeSite(url, question)
tools_used.append({ "tool": f"{tool}('{url}', '{question}')", "result": ret }) tools_used.append({ "tool": f"{tool}('{url}', '{question}')", "result": ret })
case 'DateTime': case "DateTime":
tz = arguments.get('timezone') tz = arguments.get("timezone")
ret = DateTime(tz) ret = DateTime(tz)
tools_used.append({ "tool": f"{tool}('{tz}')", "result": ret }) tools_used.append({ "tool": f"{tool}('{tz}')", "result": ret })
case 'WeatherForecast': case "WeatherForecast":
city = arguments.get('city') city = arguments.get("city")
state = arguments.get('state') state = arguments.get("state")
yield {"status": "processing", "message": f"Fetching weather data for {city}, {state}..."} yield {"status": "processing", "message": f"Fetching weather data for {city}, {state}..."}
ret = WeatherForecast(city, state) ret = WeatherForecast(city, state)
@ -352,7 +352,7 @@ async def handle_tool_calls(message):
tool_response = { tool_response = {
"role": "tool", "role": "tool",
"content": str(ret), "content": str(ret),
"name": tool_call['function']['name'] "name": tool_call["function"]["name"]
} }
all_responses.append(tool_response) all_responses.append(tool_response)
@ -401,7 +401,7 @@ class WebServer:
self.setup_routes() self.setup_routes()
def setup_routes(self): def setup_routes(self):
@self.app.get('/') @self.app.get("/")
async def root(): async def root():
context = self.create_context() context = self.create_context()
self.logging.info(f"Redirecting non-session to {context['id']}") self.logging.info(f"Redirecting non-session to {context['id']}")
@ -474,7 +474,7 @@ class WebServer:
# "document_count": file_watcher.collection.count() # "document_count": file_watcher.collection.count()
# } # }
@self.app.put('/api/umap/{context_id}') @self.app.put("/api/umap/{context_id}")
async def put_umap(context_id: str, request: Request): async def put_umap(context_id: str, request: Request):
if not self.file_watcher: if not self.file_watcher:
return return
@ -487,24 +487,24 @@ class WebServer:
try: try:
data = await request.json() data = await request.json()
dimensions = data.get('dimensions', 2) dimensions = data.get("dimensions", 2)
except: except:
dimensions = 2 dimensions = 2
try: try:
result = self.file_watcher.collection.get(include=['embeddings', 'documents', 'metadatas']) result = self.file_watcher.collection.get(include=["embeddings", "documents", "metadatas"])
vectors = np.array(result['embeddings']) vectors = np.array(result["embeddings"])
umap_model = umap.UMAP(n_components=dimensions, random_state=42) #, n_neighbors=15, min_dist=0.1) umap_model = umap.UMAP(n_components=dimensions, random_state=42) #, n_neighbors=15, min_dist=0.1)
embedding = umap_model.fit_transform(vectors) embedding = umap_model.fit_transform(vectors)
context['umap_model'] = umap_model context["umap_model"] = umap_model
result['embeddings'] = embedding.tolist() result["embeddings"] = embedding.tolist()
return JSONResponse(result) return JSONResponse(result)
except Exception as e: except Exception as e:
logging.error(e) logging.error(e)
return JSONResponse({"error": str(e)}, 500) return JSONResponse({"error": str(e)}, 500)
@self.app.put('/api/similarity/{context_id}') @self.app.put("/api/similarity/{context_id}")
async def put_similarity(context_id: str, request: Request): async def put_similarity(context_id: str, request: Request):
if not self.file_watcher: if not self.file_watcher:
return return
@ -519,9 +519,9 @@ class WebServer:
try: try:
data = await request.json() data = await request.json()
query = data.get('query', '') query = data.get("query", "")
except: except:
query = '' query = ""
if not query: if not query:
return JSONResponse({"error": "No query provided"}, status_code=400) return JSONResponse({"error": "No query provided"}, status_code=400)
@ -537,7 +537,7 @@ class WebServer:
logging.error(e) logging.error(e)
#return JSONResponse({"error": str(e)}, 500) #return JSONResponse({"error": str(e)}, 500)
@self.app.put('/api/reset/{context_id}') @self.app.put("/api/reset/{context_id}")
async def put_reset(context_id: str, request: Request): async def put_reset(context_id: str, request: Request):
if not is_valid_uuid(context_id): if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}") logging.warning(f"Invalid context_id: {context_id}")
@ -576,7 +576,7 @@ class WebServer:
except: except:
return JSONResponse({ "error": "Usage: { reset: rags|tools|history|system-prompt}"}) return JSONResponse({ "error": "Usage: { reset: rags|tools|history|system-prompt}"})
@self.app.put('/api/tunables/{context_id}') @self.app.put("/api/tunables/{context_id}")
async def put_tunables(context_id: str, request: Request): async def put_tunables(context_id: str, request: Request):
if not is_valid_uuid(context_id): if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}") logging.warning(f"Invalid context_id: {context_id}")
@ -600,7 +600,7 @@ class WebServer:
case _: case _:
return JSONResponse({ "error": f"Unrecognized tunable {k}"}, 404) return JSONResponse({ "error": f"Unrecognized tunable {k}"}, 404)
@self.app.get('/api/tunables/{context_id}') @self.app.get("/api/tunables/{context_id}")
async def get_tunables(context_id: str): async def get_tunables(context_id: str):
if not is_valid_uuid(context_id): if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}") logging.warning(f"Invalid context_id: {context_id}")
@ -611,7 +611,7 @@ class WebServer:
"message-history-length": context["message_history_length"] "message-history-length": context["message_history_length"]
}) })
@self.app.get('/api/resume/{context_id}') @self.app.get("/api/resume/{context_id}")
async def get_resume(context_id: str): async def get_resume(context_id: str):
if not is_valid_uuid(context_id): if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}") logging.warning(f"Invalid context_id: {context_id}")
@ -619,11 +619,11 @@ class WebServer:
context = self.upsert_context(context_id) context = self.upsert_context(context_id)
return JSONResponse(context["resume_history"]) return JSONResponse(context["resume_history"])
@self.app.get('/api/system-info/{context_id}') @self.app.get("/api/system-info/{context_id}")
async def get_system_info(context_id: str): async def get_system_info(context_id: str):
return JSONResponse(system_info(self.model)) return JSONResponse(system_info(self.model))
@self.app.post('/api/chat/{context_id}') @self.app.post("/api/chat/{context_id}")
async def chat_endpoint(context_id: str, request: Request): async def chat_endpoint(context_id: str, request: Request):
if not is_valid_uuid(context_id): if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}") logging.warning(f"Invalid context_id: {context_id}")
@ -633,7 +633,7 @@ class WebServer:
# Create a custom generator that ensures flushing # Create a custom generator that ensures flushing
async def flush_generator(): async def flush_generator():
async for message in self.chat(context=context, content=data['content']): async for message in self.chat(context=context, content=data["content"]):
# Convert to JSON and add newline # Convert to JSON and add newline
yield json.dumps(message) + "\n" yield json.dumps(message) + "\n"
# Save the history as its generated # Save the history as its generated
@ -652,7 +652,7 @@ class WebServer:
} }
) )
@self.app.post('/api/generate-resume/{context_id}') @self.app.post("/api/generate-resume/{context_id}")
async def post_generate_resume(context_id: str, request: Request): async def post_generate_resume(context_id: str, request: Request):
if not is_valid_uuid(context_id): if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}") logging.warning(f"Invalid context_id: {context_id}")
@ -662,7 +662,7 @@ class WebServer:
# Create a custom generator that ensures flushing # Create a custom generator that ensures flushing
async def flush_generator(): async def flush_generator():
async for message in self.generate_resume(context=context, content=data['content']): async for message in self.generate_resume(context=context, content=data["content"]):
# Convert to JSON and add newline # Convert to JSON and add newline
yield json.dumps(message) + "\n" yield json.dumps(message) + "\n"
# Save the history as its generated # Save the history as its generated
@ -681,7 +681,7 @@ class WebServer:
} }
) )
@self.app.post('/api/fact-check/{context_id}') @self.app.post("/api/fact-check/{context_id}")
async def post_fact_check(context_id: str, request: Request): async def post_fact_check(context_id: str, request: Request):
if not is_valid_uuid(context_id): if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}") logging.warning(f"Invalid context_id: {context_id}")
@ -691,7 +691,7 @@ class WebServer:
# Create a custom generator that ensures flushing # Create a custom generator that ensures flushing
async def flush_generator(): async def flush_generator():
async for message in self.fact_check(context=context, content=data['content']): async for message in self.fact_check(context=context, content=data["content"]):
# Convert to JSON and add newline # Convert to JSON and add newline
yield json.dumps(message) + "\n" yield json.dumps(message) + "\n"
# Save the history as its generated # Save the history as its generated
@ -706,27 +706,27 @@ class WebServer:
headers={ headers={
"Cache-Control": "no-cache", "Cache-Control": "no-cache",
"Connection": "keep-alive", "Connection": "keep-alive",
"X-Accel-Buffering": "no" # Prevents Nginx buffering if you're using it "X-Accel-Buffering": "no" # Prevents Nginx buffering if you"re using it
} }
) )
@self.app.post('/api/context') @self.app.post("/api/context")
async def create_context(): async def create_context():
context = self.create_context() context = self.create_context()
self.logging.info(f"Generated new session as {context['id']}") self.logging.info(f"Generated new session as {context['id']}")
return JSONResponse(context) return JSONResponse(context)
@self.app.get('/api/history/{context_id}') @self.app.get("/api/history/{context_id}")
async def get_history(context_id: str): async def get_history(context_id: str):
context = self.upsert_context(context_id) context = self.upsert_context(context_id)
return JSONResponse(context["user_history"]) return JSONResponse(context["user_history"])
@self.app.get('/api/tools/{context_id}') @self.app.get("/api/tools/{context_id}")
async def get_tools(context_id: str): async def get_tools(context_id: str):
context = self.upsert_context(context_id) context = self.upsert_context(context_id)
return JSONResponse(context["tools"]) return JSONResponse(context["tools"])
@self.app.put('/api/tools/{context_id}') @self.app.put("/api/tools/{context_id}")
async def put_tools(context_id: str, request: Request): async def put_tools(context_id: str, request: Request):
if not is_valid_uuid(context_id): if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}") logging.warning(f"Invalid context_id: {context_id}")
@ -745,12 +745,12 @@ class WebServer:
except: except:
return JSONResponse({ "status": "error" }), 405 return JSONResponse({ "status": "error" }), 405
@self.app.get('/api/rags/{context_id}') @self.app.get("/api/rags/{context_id}")
async def get_rags(context_id: str): async def get_rags(context_id: str):
context = self.upsert_context(context_id) context = self.upsert_context(context_id)
return JSONResponse(context["rags"]) return JSONResponse(context["rags"])
@self.app.put('/api/rags/{context_id}') @self.app.put("/api/rags/{context_id}")
async def put_rags(context_id: str, request: Request): async def put_rags(context_id: str, request: Request):
if not is_valid_uuid(context_id): if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}") logging.warning(f"Invalid context_id: {context_id}")
@ -769,7 +769,7 @@ class WebServer:
except: except:
return JSONResponse({ "status": "error" }), 405 return JSONResponse({ "status": "error" }), 405
@self.app.get('/api/context-status/{context_id}') @self.app.get("/api/context-status/{context_id}")
async def get_context_status(context_id): async def get_context_status(context_id):
if not is_valid_uuid(context_id): if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}") logging.warning(f"Invalid context_id: {context_id}")
@ -777,18 +777,18 @@ class WebServer:
context = self.upsert_context(context_id) context = self.upsert_context(context_id)
return JSONResponse({"context_used": context["context_tokens"], "max_context": defines.max_context}) return JSONResponse({"context_used": context["context_tokens"], "max_context": defines.max_context})
@self.app.get('/api/health') @self.app.get("/api/health")
async def health_check(): async def health_check():
return JSONResponse({"status": "healthy"}) return JSONResponse({"status": "healthy"})
@self.app.get('/{path:path}') @self.app.get("/{path:path}")
async def serve_static(path: str): async def serve_static(path: str):
full_path = os.path.join(defines.static_content, path) full_path = os.path.join(defines.static_content, path)
if os.path.exists(full_path) and os.path.isfile(full_path): if os.path.exists(full_path) and os.path.isfile(full_path):
self.logging.info(f"Serve static request for {full_path}") self.logging.info(f"Serve static request for {full_path}")
return FileResponse(full_path) return FileResponse(full_path)
self.logging.info(f"Serve index.html for {path}") self.logging.info(f"Serve index.html for {path}")
return FileResponse(os.path.join(defines.static_content, 'index.html')) return FileResponse(os.path.join(defines.static_content, "index.html"))
def save_context(self, session_id): def save_context(self, session_id):
""" """
@ -814,7 +814,7 @@ class WebServer:
if umap_model: if umap_model:
del context["umap_model"] del context["umap_model"]
# Serialize the data to JSON and write to file # Serialize the data to JSON and write to file
with open(file_path, 'w') as f: with open(file_path, "w") as f:
json.dump(context, f) json.dump(context, f)
if umap_model: if umap_model:
context["umap_model"] = umap_model context["umap_model"] = umap_model
@ -837,7 +837,7 @@ class WebServer:
return self.create_context(session_id) return self.create_context(session_id)
# Read and deserialize the data # Read and deserialize the data
with open(file_path, 'r') as f: with open(file_path, "r") as f:
self.contexts[session_id] = json.load(f) self.contexts[session_id] = json.load(f)
return self.contexts[session_id] return self.contexts[session_id]
@ -934,21 +934,21 @@ class WebServer:
yield {"status": "processing", "message": "Processing request...", "num_ctx": ctx_size} yield {"status": "processing", "message": "Processing request...", "num_ctx": ctx_size}
# Use the async generator in an async for loop # Use the async generator in an async for loop
response = self.client.chat(model=self.model, messages=messages, tools=llm_tools(context["tools"]), options={ 'num_ctx': ctx_size }) response = self.client.chat(model=self.model, messages=messages, tools=llm_tools(context["tools"]), options={ "num_ctx": ctx_size })
metadata["eval_count"] += response['eval_count'] metadata["eval_count"] += response["eval_count"]
metadata["eval_duration"] += response['eval_duration'] metadata["eval_duration"] += response["eval_duration"]
metadata["prompt_eval_count"] += response['prompt_eval_count'] metadata["prompt_eval_count"] += response["prompt_eval_count"]
metadata["prompt_eval_duration"] += response['prompt_eval_duration'] metadata["prompt_eval_duration"] += response["prompt_eval_duration"]
context["context_tokens"] = response['prompt_eval_count'] + response['eval_count'] context["context_tokens"] = response["prompt_eval_count"] + response["eval_count"]
tools_used = [] tools_used = []
yield {"status": "processing", "message": "Initial response received..."} yield {"status": "processing", "message": "Initial response received..."}
if 'tool_calls' in response.get('message', {}): if "tool_calls" in response.get("message", {}):
yield {"status": "processing", "message": "Processing tool calls..."} yield {"status": "processing", "message": "Processing tool calls..."}
message = response['message'] message = response["message"]
tool_result = None tool_result = None
# Process all yielded items from the handler # Process all yielded items from the handler
@ -961,14 +961,14 @@ class WebServer:
yield item yield item
message_dict = { message_dict = {
'role': message.get('role', 'assistant'), "role": message.get("role", "assistant"),
'content': message.get('content', '') "content": message.get("content", "")
} }
if 'tool_calls' in message: if "tool_calls" in message:
message_dict['tool_calls'] = [ message_dict["tool_calls"] = [
{'function': {'name': tc['function']['name'], 'arguments': tc['function']['arguments']}} {"function": {"name": tc["function"]["name"], "arguments": tc["function"]["arguments"]}}
for tc in message['tool_calls'] for tc in message["tool_calls"]
] ]
pre_add_index = len(messages) pre_add_index = len(messages)
@ -985,14 +985,14 @@ class WebServer:
ctx_size = self.get_optimal_ctx_size(context["context_tokens"], messages=messages[pre_add_index:]) ctx_size = self.get_optimal_ctx_size(context["context_tokens"], messages=messages[pre_add_index:])
yield {"status": "processing", "message": "Generating final response...", "num_ctx": ctx_size } yield {"status": "processing", "message": "Generating final response...", "num_ctx": ctx_size }
# Decrease creativity when processing tool call requests # Decrease creativity when processing tool call requests
response = self.client.chat(model=self.model, messages=messages, stream=False, options={ 'num_ctx': ctx_size }) #, "temperature": 0.5 }) response = self.client.chat(model=self.model, messages=messages, stream=False, options={ "num_ctx": ctx_size }) #, "temperature": 0.5 })
metadata["eval_count"] += response['eval_count'] metadata["eval_count"] += response["eval_count"]
metadata["eval_duration"] += response['eval_duration'] metadata["eval_duration"] += response["eval_duration"]
metadata["prompt_eval_count"] += response['prompt_eval_count'] metadata["prompt_eval_count"] += response["prompt_eval_count"]
metadata["prompt_eval_duration"] += response['prompt_eval_duration'] metadata["prompt_eval_duration"] += response["prompt_eval_duration"]
context["context_tokens"] = response['prompt_eval_count'] + response['eval_count'] context["context_tokens"] = response["prompt_eval_count"] + response["eval_count"]
reply = response['message']['content'] reply = response["message"]["content"]
final_message = {"role": "assistant", "content": reply } final_message = {"role": "assistant", "content": reply }
# history is provided to the LLM and should not have additional metadata # history is provided to the LLM and should not have additional metadata
@ -1006,7 +1006,7 @@ class WebServer:
yield {"status": "done", "message": final_message } yield {"status": "done", "message": final_message }
except Exception as e: except Exception as e:
logging.exception({ 'model': self.model, 'messages': messages, 'error': str(e) }) logging.exception({ "model": self.model, "messages": messages, "error": str(e) })
yield {"status": "error", "message": f"An error occurred: {str(e)}"} yield {"status": "error", "message": f"An error occurred: {str(e)}"}
finally: finally:
@ -1032,7 +1032,7 @@ class WebServer:
"resume": "", "resume": "",
"metadata": {}, "metadata": {},
"rag": "", "rag": "",
"fact_check": "" "fact_check": {}
} }
metadata = { metadata = {
@ -1044,7 +1044,7 @@ class WebServer:
"prompt_eval_duration": 0, "prompt_eval_duration": 0,
} }
rag_docs = [] rag_docs = []
resume_doc = open(defines.resume_doc, 'r').read() resume_doc = open(defines.resume_doc, "r").read()
rag_docs.append(resume_doc) rag_docs.append(resume_doc)
for rag in context["rags"]: for rag in context["rags"]:
if rag["enabled"] and rag["name"] == "JPK": # Only support JPK rag right now... if rag["enabled"] and rag["name"] == "JPK": # Only support JPK rag right now...
@ -1076,24 +1076,24 @@ class WebServer:
# 2. If not requested (no tool call,) abort the path # 2. If not requested (no tool call,) abort the path
# 3. Otherwise, we know the URL was good and can use that URLs fetched content as context. # 3. Otherwise, we know the URL was good and can use that URLs fetched content as context.
# #
response = self.client.generate(model=self.model, system=system_generate_resume, prompt=content, options={ 'num_ctx': ctx_size }) response = self.client.generate(model=self.model, system=system_generate_resume, prompt=content, options={ "num_ctx": ctx_size })
metadata["eval_count"] += response['eval_count'] metadata["eval_count"] += response["eval_count"]
metadata["eval_duration"] += response['eval_duration'] metadata["eval_duration"] += response["eval_duration"]
metadata["prompt_eval_count"] += response['prompt_eval_count'] metadata["prompt_eval_count"] += response["prompt_eval_count"]
metadata["prompt_eval_duration"] += response['prompt_eval_duration'] metadata["prompt_eval_duration"] += response["prompt_eval_duration"]
context["context_tokens"] = response['prompt_eval_count'] + response['eval_count'] context["context_tokens"] = response["prompt_eval_count"] + response["eval_count"]
reply = response['response'] reply = response["response"]
final_message = {"role": "assistant", "content": reply, "metadata": metadata } final_message = {"role": "assistant", "content": reply, "metadata": metadata }
resume['resume'] = final_message resume["resume"] = final_message
resume_history.append(resume) resume_history.append(resume)
# Return the REST API with metadata # Return the REST API with metadata
yield {"status": "done", "message": final_message } yield {"status": "done", "message": final_message }
except Exception as e: except Exception as e:
logging.exception({ 'model': self.model, 'content': content, 'error': str(e) }) logging.exception({ "model": self.model, "content": content, "error": str(e) })
yield {"status": "error", "message": f"An error occurred: {str(e)}"} yield {"status": "error", "message": f"An error occurred: {str(e)}"}
finally: finally:
@ -1128,29 +1128,29 @@ class WebServer:
# Estimate token length of new messages # Estimate token length of new messages
ctx_size = self.get_optimal_ctx_size(context["context_tokens"], messages=[system_fact_check, content]) ctx_size = self.get_optimal_ctx_size(context["context_tokens"], messages=[system_fact_check, content])
yield {"status": "processing", "message": "Processing request...", "num_ctx": ctx_size} yield {"status": "processing", "message": "Processing request...", "num_ctx": ctx_size}
response = self.client.generate(model=self.model, system=system_fact_check, prompt=content, options={ 'num_ctx': ctx_size }) response = self.client.generate(model=self.model, system=system_fact_check, prompt=content, options={ "num_ctx": ctx_size })
logging.info(f"Fact checking {ctx_size} tokens.") logging.info(f"Fact checking {ctx_size} tokens.")
metadata["eval_count"] += response['eval_count'] metadata["eval_count"] += response["eval_count"]
metadata["eval_duration"] += response['eval_duration'] metadata["eval_duration"] += response["eval_duration"]
metadata["prompt_eval_count"] += response['prompt_eval_count'] metadata["prompt_eval_count"] += response["prompt_eval_count"]
metadata["prompt_eval_duration"] += response['prompt_eval_duration'] metadata["prompt_eval_duration"] += response["prompt_eval_duration"]
context["context_tokens"] = response['prompt_eval_count'] + response['eval_count'] context["context_tokens"] = response["prompt_eval_count"] + response["eval_count"]
reply = response['response'] reply = response["response"]
final_message = {"role": "assistant", "content": reply, "metadata": metadata } final_message = {"role": "assistant", "content": reply, "metadata": metadata }
resume['fact_check'] = final_message resume["fact_check"] = final_message
# Return the REST API with metadata # Return the REST API with metadata
yield {"status": "done", "message": final_message } yield {"status": "done", "message": final_message }
except Exception as e: except Exception as e:
logging.exception({ 'model': self.model, 'content': content, 'error': str(e) }) logging.exception({ "model": self.model, "content": content, "error": str(e) })
yield {"status": "error", "message": f"An error occurred: {str(e)}"} yield {"status": "error", "message": f"An error occurred: {str(e)}"}
finally: finally:
self.processing = False self.processing = False
def run(self, host='0.0.0.0', port=WEB_PORT, **kwargs): def run(self, host="0.0.0.0", port=WEB_PORT, **kwargs):
try: try:
uvicorn.run(self.app, host=host, port=port) uvicorn.run(self.app, host=host, port=port)
except KeyboardInterrupt: except KeyboardInterrupt:
@ -1176,7 +1176,7 @@ def main():
# documents = Rag.load_text_files(defines.doc_dir) # documents = Rag.load_text_files(defines.doc_dir)
# print(f"Documents loaded {len(documents)}") # print(f"Documents loaded {len(documents)}")
# chunks = Rag.create_chunks_from_documents(documents) # chunks = Rag.create_chunks_from_documents(documents)
# doc_types = set(chunk.metadata['doc_type'] for chunk in chunks) # doc_types = set(chunk.metadata["doc_type"] for chunk in chunks)
# print(f"Document types: {doc_types}") # print(f"Document types: {doc_types}")
# print(f"Vectorstore created with {collection.count()} documents") # print(f"Vectorstore created with {collection.count()} documents")