-
- You can change the information available to the LLM by adjusting the following settings:
-
-
-
- }>
- System Prompt
-
-
- setEditSystemPrompt(e.target.value)}
- onKeyDown={handleKeyPress}
- placeholder="Enter the new system prompt.."
- id="SystemPromptInput"
- />
-
-
-
-
-
-
-
- }>
- Tunables
-
-
- setMessageHistoryLength(e.target.value)}
- slotProps={{
- htmlInput: {
- min: 0
- },
- inputLabel: {
- shrink: true,
- },
- }}
- />
-
-
-
- }>
- Tools
-
-
- These tools can be made available to the LLM for obtaining real-time information from the Internet. The description provided to the LLM is provided for reference.
-
-
-
- {
- tools.map((tool, index) =>
-
-
- } onChange={() => toggle("tool", index)} label={tool?.function?.name} />
- {tool?.function?.description}
-
- )
- }
-
-
-
- }>
- RAG
-
-
- These RAG databases can be enabled / disabled for adding additional context based on the chat request.
-
-
-
- {
- rags.map((rag, index) =>
-
-
- } onChange={() => toggle("rag", index)} label={rag?.name} />
- {rag?.description}
-
- )
- }
-
-
-
- }>
- System Information
-
-
- The server is running on the following hardware:
-
-
-
-
-
- } onClick={() => { reset(["history"], "History cleared."); }}>Clear Backstory History
-
-
+
+ You can change the information available to the LLM by adjusting the following settings:
+
+
+
+ }>
+ System Prompt
+
+
+ setEditSystemPrompt(e.target.value)}
+ onKeyDown={handleKeyPress}
+ placeholder="Enter the new system prompt.."
+ id="SystemPromptInput"
+ />
+
+
+
+
+
+
+
+ }>
+ Tunables
+
+
+ setMessageHistoryLength(e.target.value)}
+ slotProps={{
+ htmlInput: {
+ min: 0
+ },
+ inputLabel: {
+ shrink: true,
+ },
+ }}
+ />
+
+
+
+ }>
+ Tools
+
+
+ These tools can be made available to the LLM for obtaining real-time information from the Internet. The description provided to the LLM is provided for reference.
+
+
+
+ {
+ tools.map((tool, index) =>
+
+
+ } onChange={() => toggle("tool", index)} label={tool?.function?.name} />
+ {tool?.function?.description}
+
+ )
+ }
+
+
+
+ }>
+ RAG
+
+
+ These RAG databases can be enabled / disabled for adding additional context based on the chat request.
+
+
+
+ {
+ rags.map((rag, index) =>
+
+
+ } onChange={() => toggle("rag", index)} label={rag?.name} />
+ {rag?.description}
+
+ )
+ }
+
+
+
+ }>
+ System Information
+
+
+ The server is running on the following hardware:
+
+
+
+
+
+ } onClick={() => { reset(["history"], "History cleared."); }}>Clear Backstory History
+
+
);
+}
+
+
+export type {
+ ControlsParams
+};
+
+export {
+ Controls
+};
\ No newline at end of file
diff --git a/frontend/src/Conversation.tsx b/frontend/src/Conversation.tsx
new file mode 100644
index 0000000..40b5041
--- /dev/null
+++ b/frontend/src/Conversation.tsx
@@ -0,0 +1,446 @@
+import React, { useState, useImperativeHandle, forwardRef, useEffect, useRef, useCallback } from 'react';
+import TextField from '@mui/material/TextField';
+import Typography from '@mui/material/Typography';
+import Tooltip from '@mui/material/Tooltip';
+import Button from '@mui/material/Button';
+import Box from '@mui/material/Box';
+import SendIcon from '@mui/icons-material/Send';
+
+import PropagateLoader from "react-spinners/PropagateLoader";
+
+import { Message, MessageList } from './Message';
+import { SeverityType } from './Snack';
+import { ContextStatus } from './ContextStatus';
+import { MessageData } from './MessageMeta';
+
+const welcomeMarkdown = `
+# Welcome to Backstory
+
+Backstory was written by James Ketrenos in order to provide answers to
+questions potential employers may have about his work history.
+You can ask things like:
+
+
+
+
+
+
+You can click the text above to submit that query, or type it in yourself (or whatever questions you may have.)
+
+Backstory is a RAG enabled expert system with access to real-time data running self-hosted
+(no cloud) versions of industry leading Large and Small Language Models (LLM/SLMs).
+
+As with all LLM interactions, the results may not be 100% accurate. If you have questions about my career, I'd love to hear from you. You can send me an email at **james_backstory@ketrenos.com**.`;
+
+const welcomeMessage: MessageData = {
+ "role": "assistant", "content": welcomeMarkdown
+};
+const loadingMessage: MessageData = { "role": "assistant", "content": "Instancing chat session..." };
+
+type ConversationMode = 'chat' | 'fact-check' | 'system';
+
+interface ConversationHandle {
+ submitQuery: () => void;
+}
+
+interface ConversationProps {
+ type: ConversationMode
+ prompt: string,
+ connectionBase: string,
+ sessionId: string | undefined,
+ setSnack: (message: string, severity: SeverityType) => void,
+};
+
+const Conversation = forwardRef(({prompt, type, sessionId, setSnack, connectionBase} : ConversationProps, ref) => {
+ const [query, setQuery] = useState("");
+ const [contextUsedPercentage, setContextUsedPercentage] = useState(0);
+ const [processing, setProcessing] = useState(false);
+ const [countdown, setCountdown] = useState(0);
+ const [conversation, setConversation] = useState([]);
+ const timerRef = useRef(null);
+ const [lastEvalTPS, setLastEvalTPS] = useState(35);
+ const [lastPromptTPS, setLastPromptTPS] = useState(430);
+ const [contextStatus, setContextStatus] = useState({ context_used: 0, max_context: 0 });
+ const [contextWarningShown, setContextWarningShown] = useState(false);
+
+ // Update the context status
+ const updateContextStatus = useCallback(() => {
+ const fetchContextStatus = async () => {
+ try {
+ const response = await fetch(connectionBase + `/api/context-status/${sessionId}`, {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ });
+
+ if (!response.ok) {
+ throw new Error(`Server responded with ${response.status}: ${response.statusText}`);
+ }
+
+ const data = await response.json();
+ setContextStatus(data);
+ }
+ catch (error) {
+ console.error('Error getting context status:', error);
+ setSnack("Unable to obtain context status.", "error");
+ }
+ };
+ fetchContextStatus();
+ }, [setContextStatus, connectionBase, setSnack, sessionId]);
+
+ // Set the initial chat history to "loading" or the welcome message if loaded.
+ useEffect(() => {
+ if (sessionId === undefined) {
+ setConversation([loadingMessage]);
+ } else {
+ fetch(connectionBase + `/api/history/${sessionId}`, {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ })
+ .then(response => response.json())
+ .then(data => {
+ console.log(`Session id: ${sessionId} -- history returned from server with ${data.length} entries`)
+ setConversation([
+ welcomeMessage,
+ ...data
+ ]);
+ })
+ .catch(error => {
+ console.error('Error generating session ID:', error);
+ setSnack("Unable to obtain chat history.", "error");
+ });
+ updateContextStatus();
+ }
+ }, [sessionId, setConversation, updateContextStatus, connectionBase, setSnack]);
+
+
+ const isScrolledToBottom = useCallback(()=> {
+ // Current vertical scroll position
+ const scrollTop = window.scrollY || document.documentElement.scrollTop;
+
+ // Total height of the page content
+ const scrollHeight = document.documentElement.scrollHeight;
+
+ // Height of the visible window
+ const clientHeight = document.documentElement.clientHeight;
+
+ // If we're at the bottom (allowing a small buffer of 16px)
+ return scrollTop + clientHeight >= scrollHeight - 16;
+ }, []);
+
+ const scrollToBottom = useCallback(() => {
+ console.log("Scroll to bottom");
+ window.scrollTo({
+ top: document.body.scrollHeight,
+ });
+ }, []);
+
+
+ const startCountdown = (seconds: number) => {
+ if (timerRef.current) clearInterval(timerRef.current);
+ setCountdown(seconds);
+ timerRef.current = setInterval(() => {
+ setCountdown((prev) => {
+ if (prev <= 1) {
+ clearInterval(timerRef.current);
+ timerRef.current = null;
+ if (isScrolledToBottom()) {
+ setTimeout(() => {
+ scrollToBottom();
+ }, 50)
+ }
+ return 0;
+ }
+ return prev - 1;
+ });
+ }, 1000);
+ };
+
+ const submitQuery = (text: string) => {
+ sendQuery(text);
+ }
+
+ const stopCountdown = () => {
+ if (timerRef.current) {
+ clearInterval(timerRef.current);
+ timerRef.current = null;
+ setCountdown(0);
+ }
+ };
+
+ const handleKeyPress = (event: any) => {
+ if (event.key === 'Enter') {
+ switch (event.target.id) {
+ case 'QueryInput':
+ sendQuery(query);
+ break;
+ }
+ }
+ };
+
+ useImperativeHandle(ref, () => ({
+ submitQuery: () => {
+ sendQuery(query);
+ }
+ }));
+
+ // If context status changes, show a warning if necessary. If it drops
+ // back below the threshold, clear the warning trigger
+ useEffect(() => {
+ const context_used_percentage = Math.round(100 * contextStatus.context_used / contextStatus.max_context);
+ if (context_used_percentage >= 90 && !contextWarningShown) {
+ setSnack(`${context_used_percentage}% of context used. You may wish to start a new chat.`, "warning");
+ setContextWarningShown(true);
+ }
+ if (context_used_percentage < 90 && contextWarningShown) {
+ setContextWarningShown(false);
+ }
+ setContextUsedPercentage(context_used_percentage)
+ }, [contextStatus, setContextWarningShown, contextWarningShown, setContextUsedPercentage, setSnack]);
+
+ const sendQuery = async (query: string) => {
+ if (!query.trim()) return;
+
+ //setTab(0);
+
+ const userMessage: MessageData[] = [{ role: 'user', content: query }];
+
+ let scrolledToBottom;
+
+ // Add user message to conversation
+ const newConversation: MessageList = [
+ ...conversation,
+ ...userMessage
+ ];
+ setConversation(newConversation);
+ scrollToBottom();
+
+ // Clear input
+ setQuery('');
+
+ try {
+ scrolledToBottom = isScrolledToBottom();
+ setProcessing(true);
+ // Create a unique ID for the processing message
+ const processingId = Date.now().toString();
+
+ // Add initial processing message
+ setConversation(prev => [
+ ...prev,
+ { role: 'assistant', content: 'Processing request...', id: processingId, isProcessing: true }
+ ]);
+ if (scrolledToBottom) {
+ setTimeout(() => { scrollToBottom() }, 50);
+ }
+
+ // Make the fetch request with proper headers
+ const response = await fetch(connectionBase + `/api/chat/${sessionId}`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ },
+ body: JSON.stringify({ role: 'user', content: query.trim() }),
+ });
+
+ // We'll guess that the response will be around 500 tokens...
+ const token_guess = 500;
+ const estimate = Math.round(token_guess / lastEvalTPS + contextStatus.context_used / lastPromptTPS);
+
+ scrolledToBottom = isScrolledToBottom();
+ setSnack(`Query sent. Response estimated in ${estimate}s.`, "info");
+ startCountdown(Math.round(estimate));
+ if (scrolledToBottom) {
+ setTimeout(() => { scrollToBottom() }, 50);
+ }
+
+ if (!response.ok) {
+ throw new Error(`Server responded with ${response.status}: ${response.statusText}`);
+ }
+
+ if (!response.body) {
+ throw new Error('Response body is null');
+ }
+
+ // Set up stream processing with explicit chunking
+ const reader = response.body.getReader();
+ const decoder = new TextDecoder();
+ let buffer = '';
+
+ while (true) {
+ const { done, value } = await reader.read();
+ if (done) {
+ break;
+ }
+
+ const chunk = decoder.decode(value, { stream: true });
+
+ // Process each complete line immediately
+ buffer += chunk;
+ let lines = buffer.split('\n');
+ buffer = lines.pop() || ''; // Keep incomplete line in buffer
+ for (const line of lines) {
+ if (!line.trim()) continue;
+
+ try {
+ const update = JSON.parse(line);
+
+ // Force an immediate state update based on the message type
+ if (update.status === 'processing') {
+ scrolledToBottom = isScrolledToBottom();
+ // Update processing message with immediate re-render
+ setConversation(prev => prev.map(msg =>
+ msg.id === processingId
+ ? { ...msg, content: update.message }
+ : msg
+ ));
+ if (scrolledToBottom) {
+ setTimeout(() => { scrollToBottom() }, 50);
+ }
+
+ // Add a small delay to ensure React has time to update the UI
+ await new Promise(resolve => setTimeout(resolve, 0));
+
+ } else if (update.status === 'done') {
+ // Replace processing message with final result
+ scrolledToBottom = isScrolledToBottom();
+ setConversation(prev => [
+ ...prev.filter(msg => msg.id !== processingId),
+ update.message
+ ]);
+ const metadata = update.message.metadata;
+ const evalTPS = metadata.eval_count * 10 ** 9 / metadata.eval_duration;
+ const promptTPS = metadata.prompt_eval_count * 10 ** 9 / metadata.prompt_eval_duration;
+ setLastEvalTPS(evalTPS ? evalTPS : 35);
+ setLastPromptTPS(promptTPS ? promptTPS : 35);
+ updateContextStatus();
+ if (scrolledToBottom) {
+ setTimeout(() => { scrollToBottom() }, 50);
+ }
+ } else if (update.status === 'error') {
+ // Show error
+ scrolledToBottom = isScrolledToBottom();
+ setConversation(prev => [
+ ...prev.filter(msg => msg.id !== processingId),
+ { role: 'assistant', type: 'error', content: update.message }
+ ]);
+ if (scrolledToBottom) {
+ setTimeout(() => { scrollToBottom() }, 50);
+ }
+ }
+ } catch (e) {
+ setSnack("Error processing query", "error")
+ console.error('Error parsing JSON:', e, line);
+ }
+ }
+ }
+
+ // Process any remaining buffer content
+ if (buffer.trim()) {
+ try {
+ const update = JSON.parse(buffer);
+
+ if (update.status === 'done') {
+ scrolledToBottom = isScrolledToBottom();
+ setConversation(prev => [
+ ...prev.filter(msg => msg.id !== processingId),
+ update.message
+ ]);
+ if (scrolledToBottom) {
+ setTimeout(() => { scrollToBottom() }, 500);
+ }
+ }
+ } catch (e) {
+ setSnack("Error processing query", "error")
+ }
+ }
+
+ scrolledToBottom = isScrolledToBottom();
+ stopCountdown();
+ setProcessing(false);
+ if (scrolledToBottom) {
+ setTimeout(() => { scrollToBottom() }, 50);
+ }
+ } catch (error) {
+ console.error('Fetch error:', error);
+ setSnack("Unable to process query", "error");
+ scrolledToBottom = isScrolledToBottom();
+ setConversation(prev => [
+ ...prev.filter(msg => !msg.isProcessing),
+ { role: 'assistant', type: 'error', content: `Error: ${error}` }
+ ]);
+ setProcessing(false);
+ stopCountdown();
+ if (scrolledToBottom) {
+ setTimeout(() => { scrollToBottom() }, 50);
+ }
+ }
+ };
+
+ return (
+
+
+ {conversation.map((message, index) => )}
+
+
+ {processing === true && countdown > 0 && (
+ Estimated response time: {countdown}s
+ )}
+
+
+ Context used: {contextUsedPercentage}% {contextStatus.context_used}/{contextStatus.max_context}
+ {
+ contextUsedPercentage >= 90 ? WARNING: Context almost exhausted. You should start a new chat.
+ : (contextUsedPercentage >= 50 ? NOTE: Context is getting long. Queries will be slower, and the LLM may stop issuing tool calls.
+ : <>>)
+ }
+
+
+
+ setQuery(e.target.value)}
+ onKeyDown={handleKeyPress}
+ placeholder="Enter your question..."
+ id="QueryInput"
+ />
+
+
+
+
+
+ );
+});
+
+export type {
+ ConversationProps,
+ ConversationHandle
+};
+
+export {
+ Conversation
+};
\ No newline at end of file
diff --git a/frontend/src/DocumentViewer.tsx b/frontend/src/DocumentViewer.tsx
index 2825b57..49fa643 100644
--- a/frontend/src/DocumentViewer.tsx
+++ b/frontend/src/DocumentViewer.tsx
@@ -80,8 +80,8 @@ const DocumentViewer: React.FC = ({
/**
* Trigger resume generation and update UI state
*/
- const triggerGeneration = useCallback((jobDescription: string | undefined) => {
- if (jobDescription === undefined) {
+ const triggerGeneration = useCallback((description: string | undefined) => {
+ if (description === undefined) {
setProcessing(undefined);
setResume(undefined);
setActiveTab(0);
@@ -89,7 +89,7 @@ const DocumentViewer: React.FC = ({
}
setProcessing("resume");
setTimeout(() => { setActiveTab(1); }, 250); // Switch to resume view on mobile
- generateResume(jobDescription);
+ generateResume(description);
}, [generateResume, setProcessing, setActiveTab, setResume]);
/**
@@ -108,6 +108,10 @@ const DocumentViewer: React.FC = ({
setTimeout(() => { setActiveTab(2); }, 250); // Switch to resume view on mobile
}, [factCheck, setResume, setProcessing, setActiveTab, setFacts]);
+ useEffect(() => {
+ setEditJobDescription(jobDescription);
+ }, [jobDescription, setEditJobDescription]);
+
/**
* Switch to resume tab when resume become available
*/
@@ -157,10 +161,10 @@ const DocumentViewer: React.FC = ({
};
const renderJobDescriptionView = () => {
- const jobDescription = [];
+ const children = [];
if (resume === undefined && processing === undefined) {
- jobDescription.push(
+ children.push(
= ({
);
} else {
- jobDescription.push({editJobDescription})
+ children.push({editJobDescription})
}
- jobDescription.push(
+ children.push(
= ({
);
- return jobDescription;
+ return children;
}
/**
@@ -421,7 +425,7 @@ const ResumeActionCard: React.FC = ({ resume, processing,
{resume !== undefined || processing === "resume" ? (
- NOTE: As with all LLMs, hallucination is always a possibility. If the generated resume seems too good to be true, Fact Check or, expand the LLM information for this query section (at the end of the resume) and click the links in the Top RAG matches to view the relavent RAG source document to read the details. Or go back to 'Backstory' and ask a question.
+ NOTE: As with all LLMs, hallucination is always a possibility. Click Fact Check to have the LLM analyze the generated resume vs. the actual resume.
) : (
diff --git a/frontend/src/Message.tsx b/frontend/src/Message.tsx
index 7581414..5134bfd 100644
--- a/frontend/src/Message.tsx
+++ b/frontend/src/Message.tsx
@@ -1,12 +1,15 @@
-import { useState } from 'react';
+import { useState, useRef } from 'react';
import Box from '@mui/material/Box';
import Button from '@mui/material/Button';
+import IconButton from '@mui/material/IconButton';
import CardContent from '@mui/material/CardContent';
import CardActions from '@mui/material/CardActions';
import Collapse from '@mui/material/Collapse';
import Typography from '@mui/material/Typography';
import ExpandMoreIcon from '@mui/icons-material/ExpandMore';
import { ExpandMore } from './ExpandMore';
+import ContentCopyIcon from '@mui/icons-material/ContentCopy';
+import CheckIcon from '@mui/icons-material/Check';
import { MessageData, MessageMeta } from './MessageMeta';
import { ChatBubble } from './ChatBubble';
@@ -38,6 +41,19 @@ const ChatQuery = ({ text, submitQuery }: ChatQueryInterface) => {
const Message = ({ message, submitQuery, isFullWidth }: MessageInterface) => {
const [expanded, setExpanded] = useState(false);
+ const [copied, setCopied] = useState(false);
+ const textFieldRef = useRef(null);
+
+ const handleCopy = () => {
+ if (message === undefined || message.content === undefined) {
+ return;
+ }
+
+ navigator.clipboard.writeText(message.content.trim()).then(() => {
+ setCopied(true);
+ setTimeout(() => setCopied(false), 2000); // Reset after 2 seconds
+ });
+ };
const handleExpandClick = () => {
setExpanded(!expanded);
@@ -47,15 +63,43 @@ const Message = ({ message, submitQuery, isFullWidth }: MessageInterface) => {
return (<>>);
}
+ if (message.content === undefined) {
+ console.info("Message content is undefined");
+ return (<>>);
+ }
+
const formattedContent = message.content.trim();
return (
-
-
+
+
+
+ {copied ? : }
+
+
{message.role !== 'user' ?
-
+
:
-
+
{message.content}
}
@@ -86,6 +130,7 @@ export type {
MessageInterface,
MessageList,
};
+
export {
Message,
ChatQuery,
diff --git a/frontend/src/MessageMeta.tsx b/frontend/src/MessageMeta.tsx
index 102ef99..01da20a 100644
--- a/frontend/src/MessageMeta.tsx
+++ b/frontend/src/MessageMeta.tsx
@@ -100,7 +100,7 @@ const MessageMeta = ({ metadata }: MessageMetaInterface) => {
}
{
- metadata.rag.name !== undefined &&
+ metadata?.rag?.name !== undefined &&
}>
diff --git a/frontend/src/ResumeBuilder.tsx b/frontend/src/ResumeBuilder.tsx
index 93064e9..0deefb5 100644
--- a/frontend/src/ResumeBuilder.tsx
+++ b/frontend/src/ResumeBuilder.tsx
@@ -1,13 +1,11 @@
-import { useState, useCallback, } from 'react';
+import { useState, useCallback, useEffect } from 'react';
import Box from '@mui/material/Box';
import { SeverityType } from './Snack';
import { ContextStatus } from './ContextStatus';
-import { MessageData } from './MessageMeta';
+import { MessageData, MessageMetadata } from './MessageMeta';
import { DocumentViewer } from './DocumentViewer';
interface ResumeBuilderProps {
- scrollToBottom: () => void,
- isScrolledToBottom: () => boolean,
setProcessing: (processing: boolean) => void,
processing: boolean,
connectionBase: string,
@@ -17,14 +15,20 @@ interface ResumeBuilderProps {
setResume: (resume: MessageData | undefined) => void,
facts: MessageData | undefined,
setFacts: (facts: MessageData | undefined) => void,
- jobDescription: string | undefined,
- setJobDescription: (jobDescription: string | undefined) => void
};
-const ResumeBuilder = ({ jobDescription, setJobDescription, facts, setFacts, resume, setResume, setProcessing, processing, connectionBase, sessionId, setSnack }: ResumeBuilderProps) => {
+type Resume = {
+ resume: MessageData | undefined,
+ fact_check: MessageData | undefined,
+ job_description: string,
+ metadata: MessageMetadata
+};
+
+const ResumeBuilder = ({ facts, setFacts, resume, setResume, setProcessing, processing, connectionBase, sessionId, setSnack }: ResumeBuilderProps) => {
const [lastEvalTPS, setLastEvalTPS] = useState(35);
const [lastPromptTPS, setLastPromptTPS] = useState(430);
const [contextStatus, setContextStatus] = useState({ context_used: 0, max_context: 0 });
+ const [jobDescription, setJobDescription] = useState(undefined);
const updateContextStatus = useCallback(() => {
fetch(connectionBase + `/api/context-status/${sessionId}`, {
@@ -43,6 +47,49 @@ const ResumeBuilder = ({ jobDescription, setJobDescription, facts, setFacts, res
});
}, [setContextStatus, connectionBase, setSnack, sessionId]);
+ // If the jobDescription and resume have not been set, fetch them from the server
+ useEffect(() => {
+ if (sessionId === undefined) {
+ return;
+ }
+ if (jobDescription !== undefined) {
+ return;
+ }
+ const fetchResume = async () => {
+ try {
+ // Make the fetch request with proper headers
+ const response = await fetch(connectionBase + `/api/resume/${sessionId}`, {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ },
+ });
+ if (!response.ok) {
+ throw Error();
+ }
+ const data: Resume[] = await response.json();
+ if (data.length) {
+ const lastResume = data[data.length - 1];
+ console.log(lastResume);
+ setJobDescription(lastResume['job_description']);
+ setResume(lastResume.resume);
+ if (lastResume['fact_check'] !== undefined && lastResume['fact_check'] !== null) {
+ lastResume['fact_check'].role = 'info';
+ setFacts(lastResume['fact_check'])
+ } else {
+ setFacts(undefined)
+ }
+ }
+ } catch (error: any) {
+ setSnack("Unable to fetch resume", "error");
+ console.error(error);
+ }
+ }
+
+ fetchResume();
+ }, [sessionId, resume, jobDescription, setResume, setJobDescription, setSnack, setFacts, connectionBase]);
+
// const startCountdown = (seconds: number) => {
// if (timerRef.current) clearInterval(timerRef.current);
// setCountdown(seconds);
@@ -75,8 +122,8 @@ const ResumeBuilder = ({ jobDescription, setJobDescription, facts, setFacts, res
return (<>>);
}
- const generateResume = async (jobDescription: string) => {
- if (!jobDescription.trim()) return;
+ const generateResume = async (description: string) => {
+ if (!description.trim()) return;
setResume(undefined);
setFacts(undefined);
@@ -93,7 +140,7 @@ const ResumeBuilder = ({ jobDescription, setJobDescription, facts, setFacts, res
'Content-Type': 'application/json',
'Accept': 'application/json',
},
- body: JSON.stringify({ content: jobDescription.trim() }),
+ body: JSON.stringify({ content: description.trim() }),
});
// We'll guess that the response will be around 500 tokens...
diff --git a/frontend/src/StyledMarkdown.tsx b/frontend/src/StyledMarkdown.tsx
index 8343a23..30bf6eb 100644
--- a/frontend/src/StyledMarkdown.tsx
+++ b/frontend/src/StyledMarkdown.tsx
@@ -5,12 +5,13 @@ import { Link } from '@mui/material';
import { ChatQuery } from './Message';
interface StyledMarkdownProps {
+ className?: string,
content: string,
submitQuery?: (query: string) => void,
[key: string]: any, // For any additional props
};
-const StyledMarkdown: React.FC = ({ content, submitQuery, ...props }) => {
+const StyledMarkdown: React.FC = ({ className, content, submitQuery, ...props }) => {
const theme = useTheme();
let options: any = {
@@ -42,7 +43,7 @@ const StyledMarkdown: React.FC = ({ content, submitQuery, .
};
}
- return ;
+ return ;
};
export { StyledMarkdown };
\ No newline at end of file
diff --git a/frontend/src/VectorVisualizer.tsx b/frontend/src/VectorVisualizer.tsx
index b913a99..7d4b749 100644
--- a/frontend/src/VectorVisualizer.tsx
+++ b/frontend/src/VectorVisualizer.tsx
@@ -307,7 +307,7 @@ const VectorVisualizer: React.FC = ({ setSnack, connectio
{ queryEmbedding !== undefined &&
-
+
Query: {queryEmbedding.query}
diff --git a/src/server.py b/src/server.py
index ff7df30..4478ec8 100644
--- a/src/server.py
+++ b/src/server.py
@@ -136,7 +136,17 @@ DEFAULT_HISTORY_LENGTH=5
# %%
# Globals
+NAME = "James Ketrenos"
context_tag = "INFO"
+
+resume_intro = f"""
+As an AI/ML professional specializing in creating custom solutions to new problem domains, {NAME} developed a custom
+language model applications that streamline information processing and content generation. This tailored resume
+was created using a Retrieval-Augmented Generation system I built to efficiently match my relevant experience
+with your specific needs—demonstrating both my technical capabilities and commitment to intelligent resource
+optimization.
+"""
+
system_message = f"""
Launched on {DateTime()}.
@@ -163,16 +173,20 @@ When answering queries, follow these steps:
3. Use the [JOB DESCRIPTION] provided to guide the focus, tone, and relevant skills or experience to highlight from the [WORK HISTORY].
4. Identify and emphasisze the experiences, achievements, and responsibilities from the [WORK HISTORY] that best align with the [JOB DESCRIPTION].
5. Do not use the [JOB DESCRIPTION] skills unless listed in [WORK HISTORY].
+6. Do not include any information unless it is provided in [WORK HISTORY] or [INTRO].
+7. Use the [INTRO] to highlight the use of AI in generating this resume.
+8. Use the [WORK HISTORY] to create a polished, professional resume.
+9. Do not list any locations in the resume.
Structure the resume professionally with the following sections where applicable:
* "Name: Use full name."
-* "Professional Summary: A 2-4 sentence overview tailored to the job."
+* "Professional Summary: A 2-4 sentence overview tailored to the job, using [INTRO] to highlight the use of AI in generating this resume."
* "Skills: A bullet list of key skills derived from the work history and relevant to the job."
* Professional Experience: A detailed list of roles, achievements, and responsibilities from the work history that relate to the job."
* Education: Include only if available in the work history."
-Do not include any information unless it is provided in [WORK HISTORY].
+Do not include any information unless it is provided in [WORK HISTORY] or [INTRO].
Ensure the langauge is clear, concise, and aligned with industry standards for professional resumes.
"""
@@ -372,9 +386,18 @@ class WebServer:
self.file_watcher = None
self.observer = None
+ self.ssl_enabled = os.path.exists(defines.key_path) and os.path.exists(defines.cert_path)
+
+ if self.ssl_enabled:
+ allow_origins=["https://battle-linux.ketrenos.com:3000"]
+ else:
+ allow_origins=["http://battle-linux.ketrenos.com:3000"]
+
+ logging.info(f"Allowed origins: {allow_origins}")
+
self.app.add_middleware(
CORSMiddleware,
- allow_origins=["http://battle-linux.ketrenos.com:3000"],
+ allow_origins=allow_origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
@@ -1053,14 +1076,15 @@ class WebServer:
if chroma_results:
rag_docs.extend(chroma_results["documents"])
metadata["rag"] = { "name": rag["name"], **chroma_results }
- preamble = f"The current time is {DateTime()}\n"
- preamble = f"""[WORK HISTORY]:\n"""
+ preamble = f"[INTRO]\n{resume_intro}\n[/INTRO]\n"
+ preamble += f"""[WORK HISTORY]:\n"""
for doc in rag_docs:
preamble += f"{doc}\n"
resume["rag"] += f"{doc}\n"
preamble += f"\n[/WORK HISTORY]\n"
- content = f"{preamble}\nUse the above WORK HISTORY to create the resume for this JOB DESCRIPTION. Do not use the JOB DESCRIPTION skills as skills the user posseses unless listed in WORK HISTORY:\n[JOB DESCRIPTION]\n{content}\n[/JOB DESCRIPTION]\n"
+ content = f"""{preamble}\n
+ Use the above [WORK HISTORY] and [INTRO] to create the resume for this [JOB DESCRIPTION]. Do not use the [JOB DESCRIPTION] in the generated resume unless the [WORK HISTORY] mentions them:\n[JOB DESCRIPTION]\n{content}\n[/JOB DESCRIPTION]\n"""
try:
# Estimate token length of new messages
@@ -1152,7 +1176,22 @@ class WebServer:
def run(self, host="0.0.0.0", port=WEB_PORT, **kwargs):
try:
- uvicorn.run(self.app, host=host, port=port)
+ if self.ssl_enabled:
+ logging.info(f"Starting web server at https://{host}:{port}")
+ uvicorn.run(
+ self.app,
+ host=host,
+ port=port,
+ ssl_keyfile=defines.key_path,
+ ssl_certfile=defines.cert_path
+ )
+ else:
+ logging.info(f"Starting web server at http://{host}:{port}")
+ uvicorn.run(
+ self.app,
+ host=host,
+ port=port
+ )
except KeyboardInterrupt:
if self.observer:
self.observer.stop()
@@ -1181,7 +1220,6 @@ def main():
# print(f"Vectorstore created with {collection.count()} documents")
web_server = WebServer(logging, client, model)
- logging.info(f"Starting web server at http://{args.web_host}:{args.web_port}")
web_server.run(host=args.web_host, port=args.web_port, use_reloader=False)
diff --git a/src/utils/defines.py b/src/utils/defines.py
index 939d2fd..8a24f8c 100644
--- a/src/utils/defines.py
+++ b/src/utils/defines.py
@@ -11,4 +11,7 @@ max_context = 2048*8*2
doc_dir = "/opt/backstory/docs/"
session_dir = "/opt/backstory/sessions"
static_content = '/opt/backstory/frontend/deployed'
-resume_doc = '/opt/backstory/docs/resume/generic.txt'
\ No newline at end of file
+resume_doc = '/opt/backstory/docs/resume/generic.txt'
+# Only used for testing; backstory-prod will not use this
+key_path = '/opt/backstory/src/key.pem'
+cert_path = '/opt/backstory/src/cert.pem'
\ No newline at end of file