Refactored Resume Builder

This commit is contained in:
James Ketr 2025-04-18 15:58:16 -07:00
parent a6b38bfbbe
commit 6eaad89b1a
10 changed files with 582 additions and 356 deletions

View File

@ -1,5 +1,7 @@
div {
box-sizing: border-box;
overflow-wrap: break-word;
word-break: break-word;
}
.TabPanel {

View File

@ -340,7 +340,7 @@ const App = () => {
const [messageHistoryLength, setMessageHistoryLength] = useState<number>(5);
const [tab, setTab] = useState<number>(0);
const [about, setAbout] = useState<string>("");
const [jobDescription, setJobDescription] = useState<string>("");
const [jobDescription, setJobDescription] = useState<string | undefined>(undefined);
const [resume, setResume] = useState<MessageData | undefined>(undefined);
const [facts, setFacts] = useState<MessageData | undefined>(undefined);
const timerRef = useRef<any>(null);
@ -1263,7 +1263,7 @@ const App = () => {
</CustomTabPanel>
<CustomTabPanel tab={tab} index={1}>
<ResumeBuilder {...{ isScrolledToBottom, scrollToBottom, facts, setFacts, resume, setResume, jobDescription, processing, setProcessing, setSnack, connectionBase: connectionBase, sessionId }} />
<ResumeBuilder {...{ isScrolledToBottom, setJobDescription, scrollToBottom, facts, setFacts, resume, setResume, jobDescription, processing, setProcessing, setSnack, connectionBase: connectionBase, sessionId }} />
</CustomTabPanel>
<CustomTabPanel tab={tab} index={2}>

View File

@ -62,7 +62,7 @@ function ChatBubble({ role, isFullWidth, children, sx }: ChatBubbleProps) {
border: `1px solid ${theme.palette.secondary.main}`, // Dusty Teal
borderRadius: '16px',
padding: theme.spacing(1, 2),
maxWidth: isFullWidth ? '100%' : '95%',
maxWidth: isFullWidth ? '100%' : '100%',
minWidth: '70%',
alignSelf: 'flex-start',
color: theme.palette.text.primary, // Charcoal Black (#2E2E2E) — much better contrast

45
frontend/src/Document.tsx Normal file
View File

@ -0,0 +1,45 @@
import React from 'react';
import { Box, Typography } from '@mui/material';
import { SxProps, Theme } from '@mui/material';
/**
* Props for the Document component
* @interface DocumentComponentProps
* @property {string} title - The title of the document
* @property {React.ReactNode} [children] - The content of the document
*/
interface DocumentComponentProps {
title: string;
children?: React.ReactNode;
sx?: SxProps<Theme>;
}
/**
* Document component renders a container with optional title and scrollable content
*
* This component provides a consistent document viewing experience across the application
* with a title header and scrollable content area
*/
const Document: React.FC<DocumentComponentProps> = ({ title, children, sx }) => (
<Box
sx={{
...sx,
display: 'flex',
flexDirection: 'column',
flexGrow: 1,
overflow: 'hidden',
}}
>
{
title !== "" &&
<Typography sx={{ pl: 1, pr: 1, display: 'flex', mt: -1, fontWeight: 'bold' }}>{title}</Typography>
}
<Box sx={{ display: 'flex', p: 1, flexGrow: 1, overflow: 'auto' }}>
{children}
</Box>
</Box>
);
export {
Document
};

View File

@ -0,0 +1,27 @@
import { SxProps, Theme } from '@mui/material';
import { MessageData } from './MessageMeta';
/**
* Props for the DocumentViewer component
* @interface DocumentViewerProps
* @property {function} generateResume - Function to generate a resume based on job description
* @property {MessageData | undefined} resume - The generated resume data
* @property {function} setResume - Function to set the generated resume
* @property {function} factCheck - Function to fact check the generated resume
* @property {MessageData | undefined} facts - The fact check results
* @property {function} setFacts - Function to set the fact check results
* @property {string} jobDescription - The initial job description
* @property {function} setJobDescription - Function to set the job description
* @property {SxProps<Theme>} [sx] - Optional styling properties
*/
export interface DocumentViewerProps {
generateResume: (jobDescription: string) => void;
resume: MessageData | undefined;
setResume: (resume: MessageData | undefined) => void;
factCheck: (resume: string) => void;
facts: MessageData | undefined;
setFacts: (facts: MessageData | undefined) => void;
jobDescription: string | undefined;
setJobDescription: (jobDescription: string | undefined) => void;
sx?: SxProps<Theme>;
}

View File

@ -12,150 +12,216 @@ import {
Divider,
Slider,
Stack,
TextField
TextField,
Tooltip
} from '@mui/material';
import Tooltip from '@mui/material/Tooltip';
import { useTheme } from '@mui/material/styles';
import SendIcon from '@mui/icons-material/Send';
import {
ChevronLeft,
ChevronRight,
SwapHoriz,
RestartAlt as ResetIcon,
} from '@mui/icons-material';
import { SxProps, Theme } from '@mui/material';
import PropagateLoader from "react-spinners/PropagateLoader";
import { MessageData } from './MessageMeta';
import { Message } from './Message';
import { Document } from './Document';
import { DocumentViewerProps } from './DocumentTypes';
import MuiMarkdown from 'mui-markdown';
interface DocumentComponentProps {
title: string;
children?: React.ReactNode;
}
interface DocumentViewerProps {
generateResume: (jobDescription: string) => void,
factCheck: (resume: string) => void,
resume: MessageData | undefined,
facts: MessageData | undefined,
jobDescription: string,
sx?: SxProps<Theme>,
};
// Document component
const Document: React.FC<DocumentComponentProps> = ({ title, children }) => (
<Box
sx={{
display: 'flex',
flexDirection: 'column',
flexGrow: 1,
overflow: 'hidden',
}}
>
{
title !== "" &&
<Typography sx={{ pl: 1, pr: 1, display: 'flex', mt: -1, fontWeight: 'bold' }}>{title}</Typography>
}
<Box sx={{ display: 'flex', p: 1, flexGrow: 1, overflow: 'auto' }}>
{children}
</Box>
</Box>
);
const DocumentViewer: React.FC<DocumentViewerProps> = ({ generateResume, jobDescription, factCheck, resume, facts, sx }: DocumentViewerProps) => {
const [editJobDescription, setEditJobDescription] = useState<string>(jobDescription);
const [processing, setProcessing] = useState<boolean>(false);
/**
* DocumentViewer component
*
* A responsive component that displays job descriptions, generated resumes and fact checks
* with different layouts for mobile and desktop views.
*/
const DocumentViewer: React.FC<DocumentViewerProps> = ({
generateResume,
jobDescription,
factCheck,
resume,
setResume,
facts,
setFacts,
sx
}) => {
// State for editing job description
const [editJobDescription, setEditJobDescription] = useState<string | undefined>(jobDescription);
// Processing state to show loading indicators
const [processing, setProcessing] = useState<string | undefined>(undefined);
// Theme and responsive design setup
const theme = useTheme();
const isMobile = useMediaQuery(theme.breakpoints.down('md'));
// State for controlling which document is active on mobile
const [activeDocMobile, setActiveDocMobile] = useState<number>(0);
const [activeTab, setActiveTab] = useState<number>(0);
// State for controlling split ratio on desktop
const [splitRatio, setSplitRatio] = useState<number>(50);
const [splitRatio, setSplitRatio] = useState<number>(100);
/**
* Reset processing state when resume is generated
*/
useEffect(() => {
if (processing && resume !== undefined) {
setProcessing(false);
if (resume !== undefined && processing === "resume") {
setProcessing(undefined);
}
}, [processing, resume, setProcessing]);
}, [processing, resume]);
const triggerGeneration = useCallback((jobDescription: string) => {
setProcessing(true);
setActiveDocMobile(1);
/**
* Reset processing state when facts is generated
*/
useEffect(() => {
if (facts !== undefined && processing === "facts") {
setProcessing(undefined);
}
}, [processing, facts]);
/**
* Trigger resume generation and update UI state
*/
const triggerGeneration = useCallback((jobDescription: string | undefined) => {
if (jobDescription === undefined) {
setProcessing(undefined);
setResume(undefined);
setActiveTab(0);
return;
}
setProcessing("resume");
setTimeout(() => { setActiveTab(1); }, 250); // Switch to resume view on mobile
generateResume(jobDescription);
}, [setProcessing, generateResume]);
}, [generateResume, setProcessing, setActiveTab, setResume]);
/**
* Trigger fact check and update UI state
*/
const triggerFactCheck = useCallback((resume: string | undefined) => {
if (resume === undefined) {
setProcessing(undefined);
setResume(undefined);
setFacts(undefined);
setActiveTab(1);
return;
}
setProcessing("facts");
factCheck(resume);
setTimeout(() => { setActiveTab(2); }, 250); // Switch to resume view on mobile
}, [factCheck, setResume, setProcessing, setActiveTab, setFacts]);
/**
* Switch to resume tab when resume become available
*/
useEffect(() => {
if (resume !== undefined) {
setTimeout(() => { setActiveTab(1); }, 250); // Switch to resume view on mobile
}
}, [resume]);
/**
* Switch to fact check tab when facts become available
*/
useEffect(() => {
if (facts !== undefined) {
setActiveDocMobile(2);
setTimeout(() => { setActiveTab(2); }, 250); // Switch to resume view on mobile
}
}, [facts, setActiveDocMobile]);
}, [facts]);
// Handle tab change for mobile
/**
* Handle tab change for mobile view
*/
const handleTabChange = (_event: React.SyntheticEvent, newValue: number): void => {
setActiveDocMobile(newValue);
setActiveTab(newValue);
};
// Adjust split ratio
/**
* Adjust split ratio for desktop view
*/
const handleSliderChange = (_event: Event, newValue: number | number[]): void => {
setSplitRatio(newValue as number);
};
// Reset split ratio
/**
* Reset split ratio to default
*/
const resetSplit = (): void => {
setSplitRatio(50);
};
const handleKeyPress = (event: any) => {
/**
* Handle keyboard shortcuts
*/
const handleKeyPress = (event: React.KeyboardEvent): void => {
if (event.key === 'Enter' && event.ctrlKey) {
triggerGeneration(editJobDescription);
triggerGeneration(editJobDescription || "");
}
};
// Mobile view
if (isMobile) {
return (
<Box sx={{ display: 'flex', flexDirection: 'column', flexGrow: 1, ...sx }}>
{/* Tabs */}
<Tabs
value={activeDocMobile}
onChange={handleTabChange}
variant="fullWidth"
sx={{ bgcolor: 'background.paper' }}
>
<Tab label="Job Description" />
<Tab label="Resume" />
{facts !== undefined && <Tab label="Fact Check" />}
</Tabs>
const renderJobDescriptionView = () => {
const jobDescription = [];
{/* Document display area */}
<Box sx={{ display: 'flex', flexDirection: 'column', flexGrow: 1, overflow: 'hidden', p: 2 }}>
{activeDocMobile === 0 ? (<>
<Document title="">
if (resume === undefined && processing === undefined) {
jobDescription.push(
<Document key="jobDescription" sx={{ display: "flex", flexGrow: 1 }} title="">
<TextField
variant="outlined"
fullWidth
multiline
type="text"
sx={{
flex: 1, // Makes the TextField fill the parent height
flex: 1,
flexGrow: 1,
maxHeight: '100%', // Prevents it from growing larger than the parent height
overflow: 'auto', // Enables scrollbars if the content overflows
maxHeight: '100%',
overflow: 'auto',
}}
value={editJobDescription}
onChange={(e) => setEditJobDescription(e.target.value)}
onKeyDown={handleKeyPress}
// placeholder="Paste a job description (or URL that resolves to one), then click Generate..."
placeholder="Paste a job description, then click Generate..."
/>
</Document>
<Tooltip title="Generate">
<Button sx={{ m: 1, gap: 1 }} variant="contained" onClick={() => { triggerGeneration(editJobDescription); }}>Generate<SendIcon /></Button>
);
} else {
jobDescription.push(<MuiMarkdown key="jobDescription" >{editJobDescription}</MuiMarkdown>)
}
jobDescription.push(
<Box key="jobActions" sx={{ display: "flex", justifyContent: "center", flexDirection: "row" }}>
<IconButton
sx={{ display: "flex", margin: 'auto 0px' }}
size="large"
edge="start"
color="inherit"
disabled={processing !== undefined}
onClick={() => { setEditJobDescription(""); triggerGeneration(undefined); }}
>
<Tooltip title="Reset Job Description">
<ResetIcon />
</Tooltip>
</>) : (activeDocMobile === 1 ? (<Box sx={{ display: "flex", flexDirection: "column", overflow: "auto" }}>
<Document title="">{resume !== undefined && <Message message={resume} />}</Document>
{processing === true && <>
</IconButton>
<Tooltip title="Generate">
<Button
sx={{ m: 1, gap: 1, flexGrow: 1 }}
variant="contained"
onClick={() => { triggerGeneration(editJobDescription); }}
>
Generate<SendIcon />
</Button>
</Tooltip>
</Box>
);
return jobDescription;
}
/**
* Renders the resume view with loading indicator
*/
const renderResumeView = () => (
<Box key="ResumeView" sx={{ display: "flex", flexDirection: "column", overflow: "auto", flexGrow: 1, flexBasis: 0 }}>
<Document sx={{ display: "flex", flexGrow: 1 }} title="">
{resume !== undefined && <Message message={resume} />}
</Document>
{processing === "resume" && (
<Box sx={{
display: "flex",
flexDirection: "column",
@ -166,101 +232,131 @@ const DocumentViewer: React.FC<DocumentViewerProps> = ({ generateResume, jobDesc
}}>
<PropagateLoader
size="10px"
loading={processing}
loading={true}
aria-label="Loading Spinner"
data-testid="loader"
/>
<Typography>Generating resume...</Typography>
</Box>
</>}
<Card sx={{ display: "flex", overflow: "auto", minHeight: "fit-content", p: 1, flexDirection: "column" }}>
{resume !== undefined || processing === true
? <>
<Typography><b>NOTE:</b> As with all LLMs, hallucination is always a possibility. If the generated resume seems too good to be true, <b>Fact Check</b> or, expand the <b>LLM information for this query</b> section (at the end of the resume) and click the links in the <b>Top RAG</b> matches to view the relavent RAG source document to read the details. Or go back to 'Backstory' and ask a question.</Typography> {processing === false && <Tooltip title="Fact Check">
<Button sx={{ m: 1, gap: 1 }} variant="contained" onClick={() => { resume && factCheck(resume.content); }}>Fact Check<SendIcon /></Button>
</Tooltip>}</>
: <Typography>Once you click <b>Generate</b> under the <b>Job Description</b>, a resume will be generated based on the user's RAG content and the job description.</Typography>
}
</Card>
</Box>) :
(<Box sx={{ display: "flex", flexDirection: "column", overflow: "auto" }}>
<Document title="">{facts !== undefined && <Message message={facts} />}</Document>
</Box>))}
</Box>
)}
<ResumeActionCard
resume={resume}
processing={processing}
triggerFactCheck={triggerFactCheck}
/>
</Box>
);
}
// Desktop view
return (
<Box sx={{ display: 'flex', flexDirection: 'column', flexGrow: 1, ...sx }}>
{/* Split document view */}
<Box sx={{ display: 'flex', flexGrow: 1, overflow: 'hidden', p: 2 }}>
<Box sx={{ display: 'flex', flexDirection: 'column', width: `${splitRatio}%`, pr: 1, flexGrow: 1, overflow: 'hidden' }}>
<Document title="Job Description">
<TextField
variant="outlined"
fullWidth
type="text"
multiline
sx={{
flex: 1, // Makes the TextField fill the parent height
flexGrow: 1,
maxHeight: '100%', // Prevents it from growing larger than the parent height
overflow: 'auto', // Enables scrollbars if the content overflows
}}
value={editJobDescription}
onChange={(e) => setEditJobDescription(e.target.value)}
onKeyDown={handleKeyPress}
// placeholder="Paste a job description (or URL that resolves to one), then click Generate..."
placeholder="Paste a job description, then click Generate..."
/>
/**
* Renders the fact check view
*/
const renderFactCheckView = () => (
<Box key="FactView" sx={{ display: "flex", flexDirection: "column", overflow: "auto", flexGrow: 1, flexBasis: 0, p: 0 }}>
<Document sx={{ display: "flex", flexGrow: 1 }} title="">
{facts !== undefined && <Message message={facts} />}
</Document>
<Tooltip title="Generate">
<Button sx={{ m: 1, gap: 1 }} variant="contained" onClick={() => { triggerGeneration(editJobDescription); }}>Generate<SendIcon /></Button>
</Tooltip>
</Box>
<Divider orientation="vertical" flexItem />
<Box sx={{ display: 'flex', width: `${100 - splitRatio}%`, pl: 1, flexGrow: 1, flexDirection: 'column' }}>
<Document title="">{resume !== undefined && <Message message={resume} />}</Document>
{processing === "facts" && (
<Box sx={{
display: "flex",
flexDirection: "column",
alignItems: "center",
justifyContent: "center",
mb: 1
mb: 1,
height: "10px"
}}>
<PropagateLoader
size="10px"
loading={processing}
loading={true}
aria-label="Loading Spinner"
data-testid="loader"
/>
<Typography>Fact Checking resume...</Typography>
</Box>
<Card sx={{ display: "flex", overflow: "auto", minHeight: "fit-content", p: 1, flexDirection: "column" }}>
{resume !== undefined || processing === true
? <>
<Typography><b>NOTE:</b> As with all LLMs, hallucination is always a possibility. If the generated resume seems too good to be true, <b>Fact Check</b> or, expand the <b>LLM information for this query</b> section (at the end of the resume) and click the links in the <b>Top RAG</b> matches to view the relavent RAG source document to read the details. Or go back to 'Backstory' and ask a question.</Typography> { processing === false && <Tooltip title="Fact Check">
<Button sx={{ m: 1, gap: 1 }} variant="contained" onClick={() => { resume && factCheck(resume.content); }}>Fact Check<SendIcon /></Button>
</Tooltip>}</>
: <Typography>Once you click <b>Generate</b> under the <b>Job Description</b>, a resume will be generated based on the user's RAG content and the job description.</Typography>
)}
</Box>
);
// Render mobile view
if (isMobile) {
/**
* Gets the appropriate content based on active tab
*/
const getActiveMobileContent = () => {
switch (activeTab) {
case 0:
return renderJobDescriptionView();
case 1:
return renderResumeView();
case 2:
return renderFactCheckView();
default:
return renderJobDescriptionView();
}
</Card>
};
return (
<Box sx={{ display: 'flex', flexDirection: 'column', flexGrow: 1, ...sx }}>
{/* Tabs */}
<Tabs
value={activeTab}
onChange={handleTabChange}
variant="fullWidth"
sx={{ bgcolor: 'background.paper' }}
>
<Tab label="Job Description" />
{(resume !== undefined || processing === "resume") && <Tab label="Resume" />}
{(facts !== undefined || processing === "facts") && <Tab label="Fact Check" />}
</Tabs>
{/* Document display area */}
<Box sx={{ display: 'flex', flexDirection: 'column', flexGrow: 1, overflow: 'hidden', p: 0 }}>
{getActiveMobileContent()}
</Box>
{
facts !== undefined && <>
<Box sx={{ display: 'flex', width: `${100 - splitRatio}%`, pl: 1, flexGrow: 1, flexDirection: 'column' }}>
</Box>
);
}
/**
* Gets the appropriate content based on active state for Desktop
*/
const getActiveDesktopContent = () => {
/* Left panel - Job Description */
const showResume = resume !== undefined || processing === "resume"
const showFactCheck = facts !== undefined || processing === "facts"
const otherRatio = showResume ? (100 - splitRatio / 2) : 100;
const children = [];
children.push(
<Box key="JobDescription" sx={{ display: 'flex', flexDirection: 'column', width: `${otherRatio}%`, p: 0, flexGrow: 1, overflow: 'hidden' }}>
{renderJobDescriptionView()}
</Box>);
/* Resume panel - conditionally rendered if resume defined, or processing is in progress */
if (showResume) {
children.push(
<Box key="ResumeView" sx={{ display: 'flex', width: '100%', p: 0, flexGrow: 1, flexDirection: 'row' }}>
<Divider orientation="vertical" flexItem />
<Document title=""><Message message={facts} /></Document>
{renderResumeView()}
</Box>
</>}
);
}
/* Fact Check panel - conditionally rendered if facts defined, or processing is in progress */
if (showFactCheck) {
children.push(
<Box key="FactCheckView" sx={{ display: 'flex', width: `${otherRatio}%`, p: 0, flexGrow: 1, flexDirection: 'row' }}>
<Divider orientation="vertical" flexItem />
{renderFactCheckView()}
</Box>
);
}
{/* Split control panel */}
<Paper sx={{ p: 2, display: 'flex', alignItems: 'center', justifyContent: 'center' }}>
/* Split control panel - conditionally rendered if either facts or resume is set */
let slider = <Box key="slider"></Box>;
if (showResume || showFactCheck) {
slider = (
<Paper key="slider" sx={{ p: 2, display: 'flex', alignItems: 'center', justifyContent: 'center' }}>
<Stack direction="row" spacing={2} alignItems="center" sx={{ width: '60%' }}>
<IconButton onClick={() => setSplitRatio(Math.max(20, splitRatio - 10))}>
<IconButton onClick={() => setSplitRatio(Math.max(0, splitRatio - 10))}>
<ChevronLeft />
</IconButton>
@ -268,11 +364,11 @@ const DocumentViewer: React.FC<DocumentViewerProps> = ({ generateResume, jobDesc
value={splitRatio}
onChange={handleSliderChange}
aria-label="Split ratio"
min={20}
max={80}
min={0}
max={100}
/>
<IconButton onClick={() => setSplitRatio(Math.min(80, splitRatio + 10))}>
<IconButton onClick={() => setSplitRatio(Math.min(100, splitRatio + 10))}>
<ChevronRight />
</IconButton>
@ -281,12 +377,81 @@ const DocumentViewer: React.FC<DocumentViewerProps> = ({ generateResume, jobDesc
</IconButton>
</Stack>
</Paper>
);
}
return (
<Box sx={{ display: 'flex', flexGrow: 1, flexDirection: 'column', overflow: 'hidden', p: 0 }}>
<Box sx={{ display: 'flex', flexGrow: 1, flexDirection: 'row', overflow: 'hidden', p: 0 }}>
{children}
</Box>
{slider}
</Box>
)
}
return (
<Box sx={{ display: 'flex', flexDirection: 'column', flexGrow: 1, ...sx }}>
{getActiveDesktopContent()}
</Box>
);
};
export type {
DocumentViewerProps
};
export { DocumentViewer };
/**
* Props for the ResumeActionCard component
*/
interface ResumeActionCardProps {
resume: any;
processing: string | undefined;
triggerFactCheck: (resume: string | undefined) => void;
}
/**
* Action card displayed underneath the resume with notes and fact check button
*/
const ResumeActionCard: React.FC<ResumeActionCardProps> = ({ resume, processing, triggerFactCheck }) => (
<Box sx={{ display: "flex", justifyContent: "center", flexDirection: "column" }}>
<Card sx={{ display: "flex", overflow: "auto", minHeight: "fit-content", p: 1, flexDirection: "column" }}>
{resume !== undefined || processing === "resume" ? (
<Typography>
<b>NOTE:</b> As with all LLMs, hallucination is always a possibility. If the generated resume seems too good to be true, <b>Fact Check</b> or, expand the <b>LLM information for this query</b> section (at the end of the resume) and click the links in the <b>Top RAG</b> matches to view the relavent RAG source document to read the details. Or go back to 'Backstory' and ask a question.
</Typography>
) : (
<Typography>
Once you click <b>Generate</b> under the <b>Job Description</b>, a resume will be generated based on the user's RAG content and the job description.
</Typography>
)}
</Card>
<Box sx={{ display: "flex", justifyContent: "center", flexDirection: "row", flexGrow: 1 }}>
<IconButton
sx={{ display: "flex", margin: 'auto 0px' }}
size="large"
edge="start"
color="inherit"
disabled={processing === "resume"}
onClick={() => { triggerFactCheck(undefined); }}
>
<Tooltip title="Reset Resume">
<ResetIcon />
</Tooltip>
</IconButton>
<Tooltip title="Fact Check">
<span style={{ display: "flex", flexGrow: 1 }}>
<Button
sx={{ m: 1, gap: 1, flexGrow: 1 }}
variant="contained"
disabled={processing === "facts"}
onClick={() => { resume && triggerFactCheck(resume.content); }}
>
Fact Check<SendIcon />
</Button>
</span>
</Tooltip>
</Box>
</Box>
);
export {
DocumentViewer
};

View File

@ -50,7 +50,7 @@ const Message = ({ message, submitQuery, isFullWidth }: MessageInterface) => {
const formattedContent = message.content.trim();
return (
<ChatBubble isFullWidth={isFullWidth} role={message.role} sx={{ flexGrow: 1, pb: message.metadata ? 0 : "8px", mb: 1, mt: 1 }}>
<ChatBubble isFullWidth={isFullWidth} role={message.role} sx={{ flexGrow: 1, pb: message.metadata ? 0 : "8px", m: 0, mb: 1, mt: 1 }}>
<CardContent>
{message.role !== 'user' ?
<StyledMarkdown {...{ content: formattedContent, submitQuery }} />

View File

@ -1,4 +1,4 @@
import { useState, useCallback, useEffect, } from 'react';
import { useState, useCallback, } from 'react';
import Box from '@mui/material/Box';
import { SeverityType } from './Snack';
import { ContextStatus } from './ContextStatus';
@ -17,10 +17,11 @@ interface ResumeBuilderProps {
setResume: (resume: MessageData | undefined) => void,
facts: MessageData | undefined,
setFacts: (facts: MessageData | undefined) => void,
jobDescription: string,
jobDescription: string | undefined,
setJobDescription: (jobDescription: string | undefined) => void
};
const ResumeBuilder = ({ jobDescription, facts, setFacts, resume, setResume, setProcessing, processing, connectionBase, sessionId, setSnack }: ResumeBuilderProps) => {
const ResumeBuilder = ({ jobDescription, setJobDescription, facts, setFacts, resume, setResume, setProcessing, processing, connectionBase, sessionId, setSnack }: ResumeBuilderProps) => {
const [lastEvalTPS, setLastEvalTPS] = useState<number>(35);
const [lastPromptTPS, setLastPromptTPS] = useState<number>(430);
const [contextStatus, setContextStatus] = useState<ContextStatus>({ context_used: 0, max_context: 0 });
@ -191,7 +192,6 @@ const ResumeBuilder = ({ jobDescription, facts, setFacts, resume, setResume, set
const factCheck = async (resume: string) => {
if (!resume.trim()) return;
setFacts(undefined);
setSnack('Fact Check is still under development', 'warning');
try {
setProcessing(true);
@ -294,20 +294,21 @@ const ResumeBuilder = ({ jobDescription, facts, setFacts, resume, setResume, set
return (
<Box className="DocBox">
<Box className="Conversation">
<Box className="Conversation" sx={{ p: 0, pt: 1 }}>
<DocumentViewer sx={{
p: 0,
m: 0,
display: "flex",
flexGrow: 1,
overflowY: "auto",
flexDirection: "column",
height: "calc(0vh - 0px)", // Hack to make the height work
}} {...{ factCheck, facts, jobDescription, generateResume, resume }} />
}} {...{ factCheck, facts, jobDescription, generateResume, resume, setFacts, setResume, setJobDescription }} />
</Box>
</Box>
);
}
export type {
ResumeBuilderProps
};

View File

@ -257,20 +257,6 @@ const VectorVisualizer: React.FC<VectorVisualizerProps> = ({ setSnack, connectio
</Card>
<Box sx={{ display: 'flex', flexGrow: 1, justifyContent: 'center', alignItems: 'center' }}>
<Plot
onHover={(event: any) => {
const point = event.points[0];
console.log('Point:', point);
const type = point.customdata.type;
const text = point.customdata.doc;
const emoji = emojiMap[type] || '❓';
setTooltip({
visible: true,
background: point['marker.color'],
color: getTextColorForBackground(point['marker.color']),
content: `${emoji} ${type.toUpperCase()}\n${text}`,
});
}}
onClick={(event: any) => {
const point = event.points[0];
console.log('Point:', point);

View File

@ -20,14 +20,14 @@ def try_import(module_name, pip_name=None):
print(f" pip install {pip_name or module_name}")
# Third-party modules with import checks
try_import('ollama')
try_import('requests')
try_import('bs4', 'beautifulsoup4')
try_import('fastapi')
try_import('uvicorn')
try_import('numpy')
try_import('umap')
try_import('sklearn')
try_import("ollama")
try_import("requests")
try_import("bs4", "beautifulsoup4")
try_import("fastapi")
try_import("uvicorn")
try_import("numpy")
try_import("umap")
try_import("sklearn")
import ollama
import requests
@ -59,9 +59,9 @@ rags = [
def get_installed_ram():
try:
with open('/proc/meminfo', 'r') as f:
with open("/proc/meminfo", "r") as f:
meminfo = f.read()
match = re.search(r'MemTotal:\s+(\d+)', meminfo)
match = re.search(r"MemTotal:\s+(\d+)", meminfo)
if match:
return f"{math.floor(int(match.group(1)) / 1000**2)}GB" # Convert KB to GB
except Exception as e:
@ -71,12 +71,12 @@ def get_graphics_cards():
gpus = []
try:
# Run the ze-monitor utility
result = subprocess.run(['ze-monitor'], capture_output=True, text=True, check=True)
result = subprocess.run(["ze-monitor"], capture_output=True, text=True, check=True)
# Clean up the output (remove leading/trailing whitespace and newlines)
output = result.stdout.strip()
for index in range(len(output.splitlines())):
result = subprocess.run(['ze-monitor', '--device', f'{index+1}', '--info'], capture_output=True, text=True, check=True)
result = subprocess.run(["ze-monitor", "--device", f"{index+1}", "--info"], capture_output=True, text=True, check=True)
gpu_info = result.stdout.strip().splitlines()
gpu = {
"discrete": True, # Assume it's discrete initially
@ -85,17 +85,17 @@ def get_graphics_cards():
}
gpus.append(gpu)
for line in gpu_info:
match = re.match(r'^Device: [^(]*\((.*)\)', line)
match = re.match(r"^Device: [^(]*\((.*)\)", line)
if match:
gpu["name"] = match.group(1)
continue
match = re.match(r'^\s*Memory: (.*)', line)
match = re.match(r"^\s*Memory: (.*)", line)
if match:
gpu["memory"] = match.group(1)
continue
match = re.match(r'^.*Is integrated with host: Yes.*', line)
match = re.match(r"^.*Is integrated with host: Yes.*", line)
if match:
gpu["discrete"] = False
continue
@ -106,10 +106,10 @@ def get_graphics_cards():
def get_cpu_info():
try:
with open('/proc/cpuinfo', 'r') as f:
with open("/proc/cpuinfo", "r") as f:
cpuinfo = f.read()
model_match = re.search(r'model name\s+:\s+(.+)', cpuinfo)
cores_match = re.findall(r'processor\s+:\s+\d+', cpuinfo)
model_match = re.search(r"model name\s+:\s+(.+)", cpuinfo)
cores_match = re.findall(r"processor\s+:\s+\d+", cpuinfo)
if model_match and cores_match:
return f"{model_match.group(1)} with {len(cores_match)} cores"
except Exception as e:
@ -200,8 +200,8 @@ def parse_args():
parser.add_argument("--ollama-model", type=str, default=MODEL_NAME, help=f"LLM model to use. default={MODEL_NAME}")
parser.add_argument("--web-host", type=str, default=WEB_HOST, help=f"Host to launch Flask web server. default={WEB_HOST} only if --web-disable not specified.")
parser.add_argument("--web-port", type=str, default=WEB_PORT, help=f"Port to launch Flask web server. default={WEB_PORT} only if --web-disable not specified.")
parser.add_argument('--level', type=str, choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default=LOG_LEVEL, help=f'Set the logging level. default={LOG_LEVEL}')
parser.add_argument("--level", type=str, choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
default=LOG_LEVEL, help=f"Set the logging level. default={LOG_LEVEL}")
return parser.parse_args()
def setup_logging(level):
@ -209,7 +209,7 @@ def setup_logging(level):
if not isinstance(numeric_level, int):
raise ValueError(f"Invalid log level: {level}")
logging.basicConfig(level=numeric_level, format='%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s')
logging.basicConfig(level=numeric_level, format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s")
logging.info(f"Logging is set to {level} level.")
@ -230,26 +230,26 @@ async def AnalyzeSite(url, question):
try:
# Fetch the webpage
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
logging.info(f"Fetching {url}")
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
logging.info(f"{url} returned. Processing...")
# Parse the HTML
soup = BeautifulSoup(response.text, 'html.parser')
soup = BeautifulSoup(response.text, "html.parser")
# Remove script and style elements
for script in soup(["script", "style"]):
script.extract()
# Get text content
text = soup.get_text(separator=' ', strip=True)
text = soup.get_text(separator=" ", strip=True)
# Clean up text (remove extra whitespace)
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = ' '.join(chunk for chunk in chunks if chunk)
text = " ".join(chunk for chunk in chunks if chunk)
# Limit text length if needed (Ollama may have token limits)
max_chars = 100000
@ -265,12 +265,12 @@ async def AnalyzeSite(url, question):
system="You are given the contents of {url}. Answer the question about the contents",
prompt=prompt)
#logging.info(response['response'])
#logging.info(response["response"])
return {
'source': 'summarizer-llm',
'content': response['response'],
'metadata': DateTime()
"source": "summarizer-llm",
"content": response["response"],
"metadata": DateTime()
}
except requests.exceptions.RequestException as e:
@ -306,40 +306,40 @@ async def handle_tool_calls(message):
tools_used = []
all_responses = []
for i, tool_call in enumerate(message['tool_calls']):
arguments = tool_call['function']['arguments']
tool = tool_call['function']['name']
for i, tool_call in enumerate(message["tool_calls"]):
arguments = tool_call["function"]["arguments"]
tool = tool_call["function"]["name"]
# Yield status update before processing each tool
yield {"status": "processing", "message": f"Processing tool {i+1}/{len(message['tool_calls'])}: {tool}..."}
yield {"status": "processing", "message": f"Processing tool {i+1}/{len(message['tool_call'])}: {tool}..."}
# Process the tool based on its type
match tool:
case 'TickerValue':
ticker = arguments.get('ticker')
case "TickerValue":
ticker = arguments.get("ticker")
if not ticker:
ret = None
else:
ret = TickerValue(ticker)
tools_used.append({ "tool": f"{tool}({ticker})", "result": ret})
case 'AnalyzeSite':
url = arguments.get('url')
question = arguments.get('question', 'what is the summary of this content?')
case "AnalyzeSite":
url = arguments.get("url")
question = arguments.get("question", "what is the summary of this content?")
# Additional status update for long-running operations
yield {"status": "processing", "message": f"Retrieving and summarizing content from {url}..."}
ret = await AnalyzeSite(url, question)
tools_used.append({ "tool": f"{tool}('{url}', '{question}')", "result": ret })
case 'DateTime':
tz = arguments.get('timezone')
case "DateTime":
tz = arguments.get("timezone")
ret = DateTime(tz)
tools_used.append({ "tool": f"{tool}('{tz}')", "result": ret })
case 'WeatherForecast':
city = arguments.get('city')
state = arguments.get('state')
case "WeatherForecast":
city = arguments.get("city")
state = arguments.get("state")
yield {"status": "processing", "message": f"Fetching weather data for {city}, {state}..."}
ret = WeatherForecast(city, state)
@ -352,7 +352,7 @@ async def handle_tool_calls(message):
tool_response = {
"role": "tool",
"content": str(ret),
"name": tool_call['function']['name']
"name": tool_call["function"]["name"]
}
all_responses.append(tool_response)
@ -401,7 +401,7 @@ class WebServer:
self.setup_routes()
def setup_routes(self):
@self.app.get('/')
@self.app.get("/")
async def root():
context = self.create_context()
self.logging.info(f"Redirecting non-session to {context['id']}")
@ -474,7 +474,7 @@ class WebServer:
# "document_count": file_watcher.collection.count()
# }
@self.app.put('/api/umap/{context_id}')
@self.app.put("/api/umap/{context_id}")
async def put_umap(context_id: str, request: Request):
if not self.file_watcher:
return
@ -487,24 +487,24 @@ class WebServer:
try:
data = await request.json()
dimensions = data.get('dimensions', 2)
dimensions = data.get("dimensions", 2)
except:
dimensions = 2
try:
result = self.file_watcher.collection.get(include=['embeddings', 'documents', 'metadatas'])
vectors = np.array(result['embeddings'])
result = self.file_watcher.collection.get(include=["embeddings", "documents", "metadatas"])
vectors = np.array(result["embeddings"])
umap_model = umap.UMAP(n_components=dimensions, random_state=42) #, n_neighbors=15, min_dist=0.1)
embedding = umap_model.fit_transform(vectors)
context['umap_model'] = umap_model
result['embeddings'] = embedding.tolist()
context["umap_model"] = umap_model
result["embeddings"] = embedding.tolist()
return JSONResponse(result)
except Exception as e:
logging.error(e)
return JSONResponse({"error": str(e)}, 500)
@self.app.put('/api/similarity/{context_id}')
@self.app.put("/api/similarity/{context_id}")
async def put_similarity(context_id: str, request: Request):
if not self.file_watcher:
return
@ -519,9 +519,9 @@ class WebServer:
try:
data = await request.json()
query = data.get('query', '')
query = data.get("query", "")
except:
query = ''
query = ""
if not query:
return JSONResponse({"error": "No query provided"}, status_code=400)
@ -537,7 +537,7 @@ class WebServer:
logging.error(e)
#return JSONResponse({"error": str(e)}, 500)
@self.app.put('/api/reset/{context_id}')
@self.app.put("/api/reset/{context_id}")
async def put_reset(context_id: str, request: Request):
if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}")
@ -576,7 +576,7 @@ class WebServer:
except:
return JSONResponse({ "error": "Usage: { reset: rags|tools|history|system-prompt}"})
@self.app.put('/api/tunables/{context_id}')
@self.app.put("/api/tunables/{context_id}")
async def put_tunables(context_id: str, request: Request):
if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}")
@ -600,7 +600,7 @@ class WebServer:
case _:
return JSONResponse({ "error": f"Unrecognized tunable {k}"}, 404)
@self.app.get('/api/tunables/{context_id}')
@self.app.get("/api/tunables/{context_id}")
async def get_tunables(context_id: str):
if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}")
@ -611,7 +611,7 @@ class WebServer:
"message-history-length": context["message_history_length"]
})
@self.app.get('/api/resume/{context_id}')
@self.app.get("/api/resume/{context_id}")
async def get_resume(context_id: str):
if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}")
@ -619,11 +619,11 @@ class WebServer:
context = self.upsert_context(context_id)
return JSONResponse(context["resume_history"])
@self.app.get('/api/system-info/{context_id}')
@self.app.get("/api/system-info/{context_id}")
async def get_system_info(context_id: str):
return JSONResponse(system_info(self.model))
@self.app.post('/api/chat/{context_id}')
@self.app.post("/api/chat/{context_id}")
async def chat_endpoint(context_id: str, request: Request):
if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}")
@ -633,7 +633,7 @@ class WebServer:
# Create a custom generator that ensures flushing
async def flush_generator():
async for message in self.chat(context=context, content=data['content']):
async for message in self.chat(context=context, content=data["content"]):
# Convert to JSON and add newline
yield json.dumps(message) + "\n"
# Save the history as its generated
@ -652,7 +652,7 @@ class WebServer:
}
)
@self.app.post('/api/generate-resume/{context_id}')
@self.app.post("/api/generate-resume/{context_id}")
async def post_generate_resume(context_id: str, request: Request):
if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}")
@ -662,7 +662,7 @@ class WebServer:
# Create a custom generator that ensures flushing
async def flush_generator():
async for message in self.generate_resume(context=context, content=data['content']):
async for message in self.generate_resume(context=context, content=data["content"]):
# Convert to JSON and add newline
yield json.dumps(message) + "\n"
# Save the history as its generated
@ -681,7 +681,7 @@ class WebServer:
}
)
@self.app.post('/api/fact-check/{context_id}')
@self.app.post("/api/fact-check/{context_id}")
async def post_fact_check(context_id: str, request: Request):
if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}")
@ -691,7 +691,7 @@ class WebServer:
# Create a custom generator that ensures flushing
async def flush_generator():
async for message in self.fact_check(context=context, content=data['content']):
async for message in self.fact_check(context=context, content=data["content"]):
# Convert to JSON and add newline
yield json.dumps(message) + "\n"
# Save the history as its generated
@ -706,27 +706,27 @@ class WebServer:
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no" # Prevents Nginx buffering if you're using it
"X-Accel-Buffering": "no" # Prevents Nginx buffering if you"re using it
}
)
@self.app.post('/api/context')
@self.app.post("/api/context")
async def create_context():
context = self.create_context()
self.logging.info(f"Generated new session as {context['id']}")
return JSONResponse(context)
@self.app.get('/api/history/{context_id}')
@self.app.get("/api/history/{context_id}")
async def get_history(context_id: str):
context = self.upsert_context(context_id)
return JSONResponse(context["user_history"])
@self.app.get('/api/tools/{context_id}')
@self.app.get("/api/tools/{context_id}")
async def get_tools(context_id: str):
context = self.upsert_context(context_id)
return JSONResponse(context["tools"])
@self.app.put('/api/tools/{context_id}')
@self.app.put("/api/tools/{context_id}")
async def put_tools(context_id: str, request: Request):
if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}")
@ -745,12 +745,12 @@ class WebServer:
except:
return JSONResponse({ "status": "error" }), 405
@self.app.get('/api/rags/{context_id}')
@self.app.get("/api/rags/{context_id}")
async def get_rags(context_id: str):
context = self.upsert_context(context_id)
return JSONResponse(context["rags"])
@self.app.put('/api/rags/{context_id}')
@self.app.put("/api/rags/{context_id}")
async def put_rags(context_id: str, request: Request):
if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}")
@ -769,7 +769,7 @@ class WebServer:
except:
return JSONResponse({ "status": "error" }), 405
@self.app.get('/api/context-status/{context_id}')
@self.app.get("/api/context-status/{context_id}")
async def get_context_status(context_id):
if not is_valid_uuid(context_id):
logging.warning(f"Invalid context_id: {context_id}")
@ -777,18 +777,18 @@ class WebServer:
context = self.upsert_context(context_id)
return JSONResponse({"context_used": context["context_tokens"], "max_context": defines.max_context})
@self.app.get('/api/health')
@self.app.get("/api/health")
async def health_check():
return JSONResponse({"status": "healthy"})
@self.app.get('/{path:path}')
@self.app.get("/{path:path}")
async def serve_static(path: str):
full_path = os.path.join(defines.static_content, path)
if os.path.exists(full_path) and os.path.isfile(full_path):
self.logging.info(f"Serve static request for {full_path}")
return FileResponse(full_path)
self.logging.info(f"Serve index.html for {path}")
return FileResponse(os.path.join(defines.static_content, 'index.html'))
return FileResponse(os.path.join(defines.static_content, "index.html"))
def save_context(self, session_id):
"""
@ -814,7 +814,7 @@ class WebServer:
if umap_model:
del context["umap_model"]
# Serialize the data to JSON and write to file
with open(file_path, 'w') as f:
with open(file_path, "w") as f:
json.dump(context, f)
if umap_model:
context["umap_model"] = umap_model
@ -837,7 +837,7 @@ class WebServer:
return self.create_context(session_id)
# Read and deserialize the data
with open(file_path, 'r') as f:
with open(file_path, "r") as f:
self.contexts[session_id] = json.load(f)
return self.contexts[session_id]
@ -934,21 +934,21 @@ class WebServer:
yield {"status": "processing", "message": "Processing request...", "num_ctx": ctx_size}
# Use the async generator in an async for loop
response = self.client.chat(model=self.model, messages=messages, tools=llm_tools(context["tools"]), options={ 'num_ctx': ctx_size })
metadata["eval_count"] += response['eval_count']
metadata["eval_duration"] += response['eval_duration']
metadata["prompt_eval_count"] += response['prompt_eval_count']
metadata["prompt_eval_duration"] += response['prompt_eval_duration']
context["context_tokens"] = response['prompt_eval_count'] + response['eval_count']
response = self.client.chat(model=self.model, messages=messages, tools=llm_tools(context["tools"]), options={ "num_ctx": ctx_size })
metadata["eval_count"] += response["eval_count"]
metadata["eval_duration"] += response["eval_duration"]
metadata["prompt_eval_count"] += response["prompt_eval_count"]
metadata["prompt_eval_duration"] += response["prompt_eval_duration"]
context["context_tokens"] = response["prompt_eval_count"] + response["eval_count"]
tools_used = []
yield {"status": "processing", "message": "Initial response received..."}
if 'tool_calls' in response.get('message', {}):
if "tool_calls" in response.get("message", {}):
yield {"status": "processing", "message": "Processing tool calls..."}
message = response['message']
message = response["message"]
tool_result = None
# Process all yielded items from the handler
@ -961,14 +961,14 @@ class WebServer:
yield item
message_dict = {
'role': message.get('role', 'assistant'),
'content': message.get('content', '')
"role": message.get("role", "assistant"),
"content": message.get("content", "")
}
if 'tool_calls' in message:
message_dict['tool_calls'] = [
{'function': {'name': tc['function']['name'], 'arguments': tc['function']['arguments']}}
for tc in message['tool_calls']
if "tool_calls" in message:
message_dict["tool_calls"] = [
{"function": {"name": tc["function"]["name"], "arguments": tc["function"]["arguments"]}}
for tc in message["tool_calls"]
]
pre_add_index = len(messages)
@ -985,14 +985,14 @@ class WebServer:
ctx_size = self.get_optimal_ctx_size(context["context_tokens"], messages=messages[pre_add_index:])
yield {"status": "processing", "message": "Generating final response...", "num_ctx": ctx_size }
# Decrease creativity when processing tool call requests
response = self.client.chat(model=self.model, messages=messages, stream=False, options={ 'num_ctx': ctx_size }) #, "temperature": 0.5 })
metadata["eval_count"] += response['eval_count']
metadata["eval_duration"] += response['eval_duration']
metadata["prompt_eval_count"] += response['prompt_eval_count']
metadata["prompt_eval_duration"] += response['prompt_eval_duration']
context["context_tokens"] = response['prompt_eval_count'] + response['eval_count']
response = self.client.chat(model=self.model, messages=messages, stream=False, options={ "num_ctx": ctx_size }) #, "temperature": 0.5 })
metadata["eval_count"] += response["eval_count"]
metadata["eval_duration"] += response["eval_duration"]
metadata["prompt_eval_count"] += response["prompt_eval_count"]
metadata["prompt_eval_duration"] += response["prompt_eval_duration"]
context["context_tokens"] = response["prompt_eval_count"] + response["eval_count"]
reply = response['message']['content']
reply = response["message"]["content"]
final_message = {"role": "assistant", "content": reply }
# history is provided to the LLM and should not have additional metadata
@ -1006,7 +1006,7 @@ class WebServer:
yield {"status": "done", "message": final_message }
except Exception as e:
logging.exception({ 'model': self.model, 'messages': messages, 'error': str(e) })
logging.exception({ "model": self.model, "messages": messages, "error": str(e) })
yield {"status": "error", "message": f"An error occurred: {str(e)}"}
finally:
@ -1032,7 +1032,7 @@ class WebServer:
"resume": "",
"metadata": {},
"rag": "",
"fact_check": ""
"fact_check": {}
}
metadata = {
@ -1044,7 +1044,7 @@ class WebServer:
"prompt_eval_duration": 0,
}
rag_docs = []
resume_doc = open(defines.resume_doc, 'r').read()
resume_doc = open(defines.resume_doc, "r").read()
rag_docs.append(resume_doc)
for rag in context["rags"]:
if rag["enabled"] and rag["name"] == "JPK": # Only support JPK rag right now...
@ -1076,24 +1076,24 @@ class WebServer:
# 2. If not requested (no tool call,) abort the path
# 3. Otherwise, we know the URL was good and can use that URLs fetched content as context.
#
response = self.client.generate(model=self.model, system=system_generate_resume, prompt=content, options={ 'num_ctx': ctx_size })
metadata["eval_count"] += response['eval_count']
metadata["eval_duration"] += response['eval_duration']
metadata["prompt_eval_count"] += response['prompt_eval_count']
metadata["prompt_eval_duration"] += response['prompt_eval_duration']
context["context_tokens"] = response['prompt_eval_count'] + response['eval_count']
response = self.client.generate(model=self.model, system=system_generate_resume, prompt=content, options={ "num_ctx": ctx_size })
metadata["eval_count"] += response["eval_count"]
metadata["eval_duration"] += response["eval_duration"]
metadata["prompt_eval_count"] += response["prompt_eval_count"]
metadata["prompt_eval_duration"] += response["prompt_eval_duration"]
context["context_tokens"] = response["prompt_eval_count"] + response["eval_count"]
reply = response['response']
reply = response["response"]
final_message = {"role": "assistant", "content": reply, "metadata": metadata }
resume['resume'] = final_message
resume["resume"] = final_message
resume_history.append(resume)
# Return the REST API with metadata
yield {"status": "done", "message": final_message }
except Exception as e:
logging.exception({ 'model': self.model, 'content': content, 'error': str(e) })
logging.exception({ "model": self.model, "content": content, "error": str(e) })
yield {"status": "error", "message": f"An error occurred: {str(e)}"}
finally:
@ -1128,29 +1128,29 @@ class WebServer:
# Estimate token length of new messages
ctx_size = self.get_optimal_ctx_size(context["context_tokens"], messages=[system_fact_check, content])
yield {"status": "processing", "message": "Processing request...", "num_ctx": ctx_size}
response = self.client.generate(model=self.model, system=system_fact_check, prompt=content, options={ 'num_ctx': ctx_size })
response = self.client.generate(model=self.model, system=system_fact_check, prompt=content, options={ "num_ctx": ctx_size })
logging.info(f"Fact checking {ctx_size} tokens.")
metadata["eval_count"] += response['eval_count']
metadata["eval_duration"] += response['eval_duration']
metadata["prompt_eval_count"] += response['prompt_eval_count']
metadata["prompt_eval_duration"] += response['prompt_eval_duration']
context["context_tokens"] = response['prompt_eval_count'] + response['eval_count']
reply = response['response']
metadata["eval_count"] += response["eval_count"]
metadata["eval_duration"] += response["eval_duration"]
metadata["prompt_eval_count"] += response["prompt_eval_count"]
metadata["prompt_eval_duration"] += response["prompt_eval_duration"]
context["context_tokens"] = response["prompt_eval_count"] + response["eval_count"]
reply = response["response"]
final_message = {"role": "assistant", "content": reply, "metadata": metadata }
resume['fact_check'] = final_message
resume["fact_check"] = final_message
# Return the REST API with metadata
yield {"status": "done", "message": final_message }
except Exception as e:
logging.exception({ 'model': self.model, 'content': content, 'error': str(e) })
logging.exception({ "model": self.model, "content": content, "error": str(e) })
yield {"status": "error", "message": f"An error occurred: {str(e)}"}
finally:
self.processing = False
def run(self, host='0.0.0.0', port=WEB_PORT, **kwargs):
def run(self, host="0.0.0.0", port=WEB_PORT, **kwargs):
try:
uvicorn.run(self.app, host=host, port=port)
except KeyboardInterrupt:
@ -1176,7 +1176,7 @@ def main():
# documents = Rag.load_text_files(defines.doc_dir)
# print(f"Documents loaded {len(documents)}")
# chunks = Rag.create_chunks_from_documents(documents)
# doc_types = set(chunk.metadata['doc_type'] for chunk in chunks)
# doc_types = set(chunk.metadata["doc_type"] for chunk in chunks)
# print(f"Document types: {doc_types}")
# print(f"Vectorstore created with {collection.count()} documents")