Chat is working again, just not saving

This commit is contained in:
James Ketr 2025-05-29 14:15:21 -07:00
parent 02a278736e
commit 11447b68aa
15 changed files with 475 additions and 522 deletions

View File

@ -44,6 +44,7 @@ module.exports = {
},
webpack: {
configure: (webpackConfig) => {
webpackConfig.devtool = 'source-map';
// Add .ts and .tsx to resolve.extensions
webpackConfig.resolve.extensions = [
...webpackConfig.resolve.extensions,

View File

@ -8,20 +8,20 @@ import CancelIcon from '@mui/icons-material/Cancel';
import { SxProps, Theme } from '@mui/material';
import PropagateLoader from "react-spinners/PropagateLoader";
import { Message, MessageRoles } from './Message';
import { Message } from './Message';
import { DeleteConfirmation } from 'components/DeleteConfirmation';
import { BackstoryTextField, BackstoryTextFieldRef } from 'components/BackstoryTextField';
import { BackstoryElementProps } from './BackstoryTab';
import { connectionBase } from 'utils/Global';
import { useUser } from "hooks/useUser";
import { StreamingResponse } from 'types/api-client';
import { ChatMessage, ChatContext, ChatSession, ChatQuery } from 'types/types';
import { ChatMessage, ChatMessageBase, ChatContext, ChatSession, ChatQuery } from 'types/types';
import { PaginatedResponse } from 'types/conversion';
import './Conversation.css';
const defaultMessage: ChatMessage = {
status: "thinking", sender: "system", sessionId: "", timestamp: new Date(), content: ""
type: "preparing", status: "done", sender: "system", sessionId: "", timestamp: new Date(), content: ""
};
const loadingMessage: ChatMessage = { ...defaultMessage, content: "Establishing connection with server..." };
@ -249,6 +249,7 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
...conversationRef.current,
{
...defaultMessage,
type: 'user',
sender: 'user',
content: query.prompt,
}
@ -259,44 +260,44 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>((props: C
);
controllerRef.current = apiClient.sendMessageStream(sessionId, query, {
onComplete: (msg) => {
console.log(msg);
switch (msg.status) {
case "done":
case "partial":
setConversation([
...conversationRef.current, {
...msg,
role: 'assistant',
origin: type,
}] as ChatMessage[]);
if (msg.status === "done") {
setStreamingMessage(undefined);
setProcessingMessage(undefined);
setProcessing(false);
controllerRef.current = null;
}
if (onResponse) {
onResponse(msg);
}
break;
case "error":
// Show error
setConversation([
...conversationRef.current,
msg
]);
setProcessingMessage(msg);
setProcessing(false);
controllerRef.current = null;
break;
default:
setProcessingMessage(msg);
break;
onMessage: (msg) => {
console.log("onMessage:", msg);
if (msg.type === "response") {
setConversation([
...conversationRef.current,
msg
]);
setStreamingMessage(undefined);
setProcessingMessage(undefined);
setProcessing(false);
} else {
setProcessingMessage(msg);
}
if (onResponse) {
onResponse(msg);
}
},
onPartialMessage: (chunk) => {
setStreamingMessage({ ...defaultMessage, status: "streaming", content: chunk });
onError: (error: string | ChatMessageBase) => {
console.log("onError:", error);
// Type-guard to determine if this is a ChatMessageBase or a string
if (typeof error === "object" && error !== null && "content" in error) {
setProcessingMessage(error as ChatMessage);
setProcessing(false);
controllerRef.current = null;
} else {
setProcessingMessage({ ...defaultMessage, content: error as string });
}
},
onStreaming: (chunk) => {
console.log("onStreaming:", chunk);
setStreamingMessage({ ...defaultMessage, ...chunk });
},
onStatusChange: (status) => {
console.log("onStatusChange:", status);
},
onComplete: () => {
console.log("onComplete");
controllerRef.current = null;
}
});
};

View File

@ -32,68 +32,9 @@ import { SetSnackType } from './Snack';
import { CopyBubble } from './CopyBubble';
import { Scrollable } from './Scrollable';
import { BackstoryElementProps } from './BackstoryTab';
import { ChatMessage, ChatSession } from 'types/types';
type MessageRoles =
'assistant' |
'content' |
'error' |
'fact-check' |
'info' |
'job-description' |
'job-requirements' |
'processing' |
'qualifications' |
'resume' |
'status' |
'streaming' |
'system' |
'thinking' |
'user';
type BackstoryMessage = {
// Only two required fields
role: MessageRoles,
content: string,
// Rest are optional
prompt?: string;
preamble?: {};
status?: string;
remaining_time?: number;
full_content?: string;
response?: string; // Set when status === 'done', 'partial', or 'error'
chunk?: string; // Used when status === 'streaming'
timestamp?: number;
disableCopy?: boolean,
user?: string,
title?: string,
origin?: string,
display?: string, /* Messages generated on the server for filler should not be shown */
id?: string,
isProcessing?: boolean,
actions?: string[],
metadata?: MessageMetaData,
expanded?: boolean,
expandable?: boolean,
};
interface ChatBubbleProps {
role: MessageRoles,
isInfo?: boolean;
children: React.ReactNode;
sx?: SxProps<Theme>;
className?: string;
title?: string;
expanded?: boolean;
expandable?: boolean;
onExpand?: (open: boolean) => void;
}
function ChatBubble(props: ChatBubbleProps) {
const { role, children, sx, className, title, onExpand, expandable, expanded } = props;
const theme = useTheme();
import { ChatMessage, ChatSession, ChatMessageType } from 'types/types';
const getStyle = (theme: Theme, type: ChatMessageType): any => {
const defaultRadius = '16px';
const defaultStyle = {
padding: theme.spacing(1, 2),
@ -115,7 +56,7 @@ function ChatBubble(props: ChatBubbleProps) {
};
const styles: any = {
assistant: {
response: {
...defaultStyle,
backgroundColor: theme.palette.primary.main,
border: `1px solid ${theme.palette.secondary.main}`,
@ -184,7 +125,7 @@ function ChatBubble(props: ChatBubbleProps) {
opacity: 0.9,
transition: 'opacity 0.3s ease-in-out',
},
streaming: 'assistant',
streaming: 'response',
system: {
...defaultStyle,
backgroundColor: '#EDEAE0',
@ -214,102 +155,32 @@ function ChatBubble(props: ChatBubbleProps) {
}
}
return styles[type];
}
const getIcon = (messageType: string): React.ReactNode | null => {
const icons: any = {
error: <ErrorOutline color="error" />,
generating: <LocationSearchingIcon />,
info: <InfoOutline color="info" />,
preparing: <LocationSearchingIcon />,
processing: <LocationSearchingIcon />,
searching: <Memory />,
system: <Memory />,
thinking: <Psychology />,
tooling: <LocationSearchingIcon />,
};
// Render Accordion for expandable content
if (expandable || title) {
// Determine if Accordion is controlled
const isControlled = typeof expanded === 'boolean' && typeof onExpand === 'function';
return (
<Accordion
expanded={isControlled ? expanded : undefined} // Omit expanded prop for uncontrolled
defaultExpanded={expanded} // Default to collapsed for uncontrolled Accordion
className={className}
onChange={(_event, newExpanded) => {
if (isControlled && onExpand) {
onExpand(newExpanded); // Call onExpand with new state
}
}}
sx={{ ...styles[role], ...sx }}
>
<AccordionSummary
expandIcon={<ExpandMoreIcon />}
slotProps={{
content: {
sx: {
fontWeight: 'bold',
fontSize: '1.1rem',
m: 0,
p: 0,
display: 'flex',
justifyItems: 'center',
},
},
}}
>
{title || ''}
</AccordionSummary>
<AccordionDetails sx={{ mt: 0, mb: 0, p: 0, pl: 2, pr: 2 }}>
{children}
</AccordionDetails>
</Accordion>
);
}
// Render non-expandable content
return (
<Box
className={className}
sx={{
...(role in styles ? styles[role] : styles['status']),
gap: 1,
display: 'flex',
...sx,
flexDirection: 'row',
}}
>
{icons[role] !== undefined && icons[role]}
<Box sx={{ p: 0, m: 0, gap: 0, display: 'flex', flexGrow: 1, flexDirection: 'column' }}>
{children}
</Box>
</Box>
);
return icons[messageType] || null;
}
interface MessageMetaData {
query?: {
query_embedding: number[];
vector_embedding: number[];
},
origin: string,
rag: any[],
tools?: {
tool_calls: any[],
},
eval_count: number,
eval_duration: number,
prompt_eval_count: number,
prompt_eval_duration: number,
connectionBase: string,
setSnack: SetSnackType,
}
type MessageList = BackstoryMessage[];
interface MessageProps extends BackstoryElementProps {
sx?: SxProps<Theme>,
message: ChatMessage,
title?: string,
chatSession?: ChatSession,
className?: string,
sx?: SxProps<Theme>,
expandable?: boolean,
expanded?: boolean,
onExpand?: (open: boolean) => void,
className?: string,
chatSession?: ChatSession,
};
interface MessageMetaProps {
@ -317,7 +188,6 @@ interface MessageMetaProps {
messageProps: MessageProps
};
const MessageMeta = (props: MessageMetaProps) => {
const {
/* MessageData */
@ -447,102 +317,124 @@ const MessageMeta = (props: MessageMetaProps) => {
</>);
};
interface MessageContainerProps {
type: ChatMessageType,
metadataView?: React.ReactNode | null,
messageView?: React.ReactNode | null,
sx?: SxProps<Theme>,
copyContent?: string,
};
const MessageContainer = (props: MessageContainerProps) => {
const { type, sx, messageView, metadataView, copyContent } = props;
const icon = getIcon(type);
return <Box
className={`Message Message-${type}`}
sx={{
display: "flex",
flexDirection: "column",
m: 0,
mt: 1,
marginBottom: "0px !important", // Remove whitespace from expanded Accordion
gap: 1,
...sx,
}}>
<Box sx={{ display: "flex", flexDirection: 'row' }}>
{icon !== null && icon}
{messageView}
</Box>
{metadataView}
{copyContent && <CopyBubble content={copyContent} />}
</Box>;
};
const Message = (props: MessageProps) => {
const { message, submitQuery, sx, className, chatSession, onExpand, setSnack, expanded } = props;
const { message, title, submitQuery, sx, className, chatSession, onExpand, setSnack, expanded, expandable } = props;
const [metaExpanded, setMetaExpanded] = useState<boolean>(false);
const textFieldRef = useRef(null);
const backstoryProps = {
submitQuery,
setSnack
};
const theme = useTheme();
const style: any = getStyle(theme, message.type);
const handleMetaExpandClick = () => {
setMetaExpanded(!metaExpanded);
};
if (message === undefined) {
return (<></>);
}
const content = message.content?.trim();
if (!content) {
return (<></>)
};
if (message.content === undefined) {
console.info("Message content is undefined");
return (<></>);
}
const formattedContent = message.content.trim();
if (formattedContent === "") {
return (<></>);
}
return (
<ChatBubble
role='assistant'
className={`${className || ""} Message Message-${message.sender}`}
{...message}
expanded={expanded}
onExpand={onExpand}
sx={{
display: "flex",
flexDirection: "column",
pb: message.metadata ? 0 : "8px",
m: 0,
mt: 1,
marginBottom: "0px !important", // Remove whitespace from expanded Accordion
// overflowX: "auto"
...sx,
}}>
<CardContent ref={textFieldRef} sx={{ position: "relative", display: "flex", flexDirection: "column", overflowX: "auto", m: 0, p: 0, paddingBottom: '0px !important' }}>
<Scrollable
className="MessageContent"
autoscroll
fallbackThreshold={0.5}
sx={{
p: 0,
m: 0,
// maxHeight: (message.role === "streaming") ? "20rem" : "unset",
display: "flex",
flexGrow: 1,
overflow: "auto", /* Handles scrolling for the div */
}}
>
<StyledMarkdown chatSession={chatSession} streaming={message.status === "streaming"} content={formattedContent} {...backstoryProps} />
</Scrollable>
</CardContent>
<CardActions disableSpacing sx={{ display: "flex", flexDirection: "row", justifyContent: "space-between", alignItems: "center", width: "100%", p: 0, m: 0 }}>
{/*(message.disableCopy === undefined || message.disableCopy === false) &&*/ <CopyBubble content={message.content} />}
{message.metadata && (
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
<Button variant="text" onClick={handleMetaExpandClick} sx={{ color: "darkgrey", p: 0 }}>
LLM information for this query
</Button>
<ExpandMore
expand={metaExpanded}
onClick={handleMetaExpandClick}
aria-expanded={true /*message.expanded*/}
aria-label="show more"
>
<ExpandMoreIcon />
</ExpandMore>
</Box>
)}
</CardActions>
{message.metadata && <>
<Collapse in={metaExpanded} timeout="auto" unmountOnExit>
<CardContent>
<MessageMeta messageProps={props} metadata={message.metadata} />
</CardContent>
</Collapse>
</>}
</ChatBubble>
const messageView = (
<StyledMarkdown chatSession={chatSession} streaming={message.status === "streaming"} content={content} {...backstoryProps} />
);
};
let metadataView = (<></>);
if (message.metadata) {
metadataView = (<>
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
<Button variant="text" onClick={handleMetaExpandClick} sx={{ color: "darkgrey", p: 0 }}>
LLM information for this query
</Button>
<ExpandMore
expand={metaExpanded}
onClick={handleMetaExpandClick}
aria-expanded={true /*message.expanded*/}
aria-label="show more">
<ExpandMoreIcon />
</ExpandMore>
</Box>
<Collapse in={metaExpanded} timeout="auto" unmountOnExit>
<CardContent>
<MessageMeta messageProps={props} metadata={message.metadata} />
</CardContent>
</Collapse>
</>);
}
if (!expandable) {
/* When not expandable, the styles are applied directly to MessageContainer */
return (<>
{messageView && <MessageContainer type={message.type} {...{ messageView, metadataView }} sx={{ ...style, ...sx }} />}
</>);
}
// Determine if Accordion is controlled
const isControlled = typeof expanded === 'boolean' && typeof onExpand === 'function';
return (
<Accordion
expanded={isControlled ? expanded : undefined} // Omit expanded prop for uncontrolled
defaultExpanded={expanded} // Default to collapsed for uncontrolled Accordion
className={className}
onChange={(_event, newExpanded) => { isControlled && onExpand && onExpand(newExpanded) }}
sx={{ ...sx, ...style }}>
<AccordionSummary
expandIcon={<ExpandMoreIcon />}
slotProps={{
content: {
sx: {
display: 'flex',
justifyItems: 'center',
m: 0, p: 0,
fontWeight: 'bold',
fontSize: '1.1rem',
},
},
}}>
{title || ''}
</AccordionSummary>
<AccordionDetails sx={{ mt: 0, mb: 0, p: 0, pl: 2, pr: 2 }}>
<MessageContainer type={message.type} {...{ messageView, metadataView }} />
</AccordionDetails>
</Accordion>
);
}
export type {
MessageProps,
MessageList,
BackstoryMessage,
MessageMetaData,
MessageRoles,
MessageProps,
};
export {

View File

@ -17,7 +17,7 @@ const ChatPage = forwardRef<ConversationHandle, BackstoryPageProps>((props: Back
const isMobile = useMediaQuery(theme.breakpoints.down('md'));
const [questions, setQuestions] = useState<React.ReactElement[]>([]);
console.log("ChatPage candidate =>", candidate);
// console.log("ChatPage candidate =>", candidate);
useEffect(() => {
if (!candidate) {
return;

View File

@ -19,7 +19,7 @@ import { StyledMarkdown } from 'components/StyledMarkdown';
import { Scrollable } from '../components/Scrollable';
import { Pulse } from 'components/Pulse';
import { StreamingResponse } from 'types/api-client';
import { ChatContext, ChatSession, ChatQuery } from 'types/types';
import { ChatContext, ChatMessage, ChatMessageBase, ChatSession, ChatQuery } from 'types/types';
import { useUser } from 'hooks/useUser';
const emptyUser: Candidate = {
@ -102,21 +102,30 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
setShouldGenerateProfile(false); // Reset the flag
const streamResponse = apiClient.sendMessageStream(sessionId, query, {
onPartialMessage: (content, messageId) => {
console.log('Partial content:', content);
onMessage: (chatMessage: ChatMessage) => {
console.log('Message:', chatMessage);
// Update UI with partial content
},
onStatusChange: (status) => {
console.log('Status changed:', status);
// Update UI status indicator
},
onComplete: (finalMessage) => {
console.log('Final message:', finalMessage.content);
// Handle completed message
onComplete: () => {
console.log('Content complete');
},
onError: (error) => {
console.error('Streaming error:', error);
// Handle error
onWarn: (warning) => {
console.log("Warning:", warning);
},
onError: (error: string | ChatMessageBase) => {
// Type-guard to determine if this is a ChatMessageBase or a string
if (typeof error === "object" && error !== null && "content" in error) {
console.log("Error message:", error);
} else {
console.log("Error string:", error);
}
},
onStreaming: (chunk) => {
console.log("Streaming: ", chunk);
}
});
// controllerRef.current = streamQueryResponse({

View File

@ -6,6 +6,7 @@ import { ChatMessage } from 'types/types';
const LoadingPage = (props: BackstoryPageProps) => {
const preamble: ChatMessage = {
sender: 'system',
type: 'preparing',
status: 'done',
sessionId: '',
content: 'Please wait while connecting to Backstory...',

View File

@ -7,11 +7,9 @@ import {
import { SxProps } from '@mui/material';
import { BackstoryQuery } from 'components/BackstoryQuery';
import { MessageList, BackstoryMessage } from 'components/Message';
import { Conversation } from 'components/Conversation';
import { BackstoryPageProps } from 'components/BackstoryTab';
import { ChatQuery } from "types/types";
import { ChatQuery, ChatMessage } from "types/types";
import './ResumeBuilderPage.css';
/**
@ -58,19 +56,19 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
factsConversationRef.current?.submitQuery(query);
};
const filterJobDescriptionMessages = useCallback((messages: MessageList): MessageList => {
const filterJobDescriptionMessages = useCallback((messages: ChatMessage[]): ChatMessage[] => {
if (messages === undefined || messages.length === 0) {
return [];
}
if (messages.length > 0) {
messages[0].role = 'content';
messages[0].title = 'Job Description';
messages[0].disableCopy = false;
messages[0].expandable = true;
// messages[0].role = 'content';
// messages[0].title = 'Job Description';
// messages[0].disableCopy = false;
// messages[0].expandable = true;
}
if (-1 !== messages.findIndex(m => m.status === 'done' || (m.actions && m.actions.includes("resume_generated")))) {
if (-1 !== messages.findIndex(m => m.status === 'done')) { // || (m.actions && m.actions.includes("resume_generated")))) {
setHasResume(true);
setHasFacts(true);
}
@ -85,11 +83,11 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
if (messages.length > 3) {
// messages[2] is Show job requirements
messages[3].role = 'job-requirements';
messages[3].title = 'Job Requirements';
messages[3].disableCopy = false;
messages[3].expanded = false;
messages[3].expandable = true;
// messages[3].role = 'job-requirements';
// messages[3].title = 'Job Requirements';
// messages[3].disableCopy = false;
// messages[3].expanded = false;
// messages[3].expandable = true;
}
/* Filter out the 2nd and 3rd (0-based) */
@ -99,7 +97,7 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
return filtered;
}, [setHasResume, setHasFacts]);
const filterResumeMessages = useCallback((messages: MessageList): MessageList => {
const filterResumeMessages = useCallback((messages: ChatMessage[]): ChatMessage[] => {
if (messages === undefined || messages.length === 0) {
return [];
}
@ -108,20 +106,20 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
if (messages.length > 1) {
// messages[0] is Show Qualifications
messages[1].role = 'qualifications';
messages[1].title = 'Candidate qualifications';
messages[1].disableCopy = false;
messages[1].expanded = false;
messages[1].expandable = true;
// messages[1].role = 'qualifications';
// messages[1].title = 'Candidate qualifications';
// messages[1].disableCopy = false;
// messages[1].expanded = false;
// messages[1].expandable = true;
}
if (messages.length > 3) {
// messages[2] is Show Resume
messages[3].role = 'resume';
messages[3].title = 'Generated Resume';
messages[3].disableCopy = false;
messages[3].expanded = true;
messages[3].expandable = true;
// messages[3].role = 'resume';
// messages[3].title = 'Generated Resume';
// messages[3].disableCopy = false;
// messages[3].expanded = true;
// messages[3].expandable = true;
}
/* Filter out the 1st and 3rd messages (0-based) */
@ -130,18 +128,18 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
return filtered;
}, []);
const filterFactsMessages = useCallback((messages: MessageList): MessageList => {
const filterFactsMessages = useCallback((messages: ChatMessage[]): ChatMessage[] => {
if (messages === undefined || messages.length === 0) {
return [];
}
if (messages.length > 1) {
// messages[0] is Show verification
messages[1].role = 'fact-check';
messages[1].title = 'Fact Check';
messages[1].disableCopy = false;
messages[1].expanded = true;
messages[1].expandable = true;
// messages[1].role = 'fact-check';
// messages[1].title = 'Fact Check';
// messages[1].disableCopy = false;
// messages[1].expanded = true;
// messages[1].expandable = true;
}
/* Filter out the 1st (0-based) */
@ -150,33 +148,33 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
return filtered;
}, []);
const jobResponse = useCallback(async (message: BackstoryMessage) => {
if (message.actions && message.actions.includes("job_description")) {
if (jobConversationRef.current) {
await jobConversationRef.current.fetchHistory();
}
}
if (message.actions && message.actions.includes("resume_generated")) {
if (resumeConversationRef.current) {
await resumeConversationRef.current.fetchHistory();
}
setHasResume(true);
setActiveTab(1); // Switch to Resume tab
}
if (message.actions && message.actions.includes("facts_checked")) {
if (factsConversationRef.current) {
await factsConversationRef.current.fetchHistory();
}
setHasFacts(true);
}
const jobResponse = useCallback(async (message: ChatMessage) => {
// if (message.actions && message.actions.includes("job_description")) {
// if (jobConversationRef.current) {
// await jobConversationRef.current.fetchHistory();
// }
// }
// if (message.actions && message.actions.includes("resume_generated")) {
// if (resumeConversationRef.current) {
// await resumeConversationRef.current.fetchHistory();
// }
// setHasResume(true);
// setActiveTab(1); // Switch to Resume tab
// }
// if (message.actions && message.actions.includes("facts_checked")) {
// if (factsConversationRef.current) {
// await factsConversationRef.current.fetchHistory();
// }
// setHasFacts(true);
// }
}, [setHasFacts, setHasResume, setActiveTab]);
const resumeResponse = useCallback((message: BackstoryMessage): void => {
const resumeResponse = useCallback((message: ChatMessage): void => {
console.log('onResumeResponse', message);
setHasFacts(true);
}, [setHasFacts]);
const factsResponse = useCallback((message: BackstoryMessage): void => {
const factsResponse = useCallback((message: ChatMessage): void => {
console.log('onFactsResponse', message);
}, []);
@ -207,7 +205,7 @@ const ResumeBuilderPage: React.FC<BackstoryPageProps> = (props: BackstoryPagePro
// </Box>,
// ];
// const jobDescriptionPreamble: MessageList = [{
// const jobDescriptionPreamble: ChatMessage[] = [{
// role: 'info',
// content: `Once you paste a job description and press **Generate Resume**, Backstory will perform the following actions:

View File

@ -26,29 +26,19 @@ import {
// ============================
interface StreamingOptions {
onMessage?: (message: Types.ChatMessage) => void;
onPartialMessage?: (partialContent: string, messageId?: string) => void;
onComplete?: (finalMessage: Types.ChatMessage) => void;
onError?: (error: Error) => void;
onStatusChange?: (status: Types.ChatStatusType) => void;
onMessage?: (message: Types.ChatMessage) => void;
onStreaming?: (chunk: Types.ChatMessageBase) => void;
onComplete?: () => void;
onError?: (error: string | Types.ChatMessageBase) => void;
onWarn?: (warning: string) => void;
signal?: AbortSignal;
}
interface StreamingResponse {
messageId: string;
cancel: () => void;
promise: Promise<Types.ChatMessage>;
}
interface ChatMessageChunk {
id?: string;
sessionId: string;
status: Types.ChatStatusType;
sender: Types.ChatSenderType;
content: string;
isPartial?: boolean;
timestamp: Date;
metadata?: Record<string, any>;
promise: Promise<Types.ChatMessage[]>;
}
// ============================
@ -87,7 +77,6 @@ class ApiClient {
}
async logout(accessToken: string, refreshToken: string): Promise<Types.ApiResponse> {
console.log(this.defaultHeaders);
const response = await fetch(`${this.baseUrl}/auth/logout`, {
method: 'POST',
headers: this.defaultHeaders,
@ -348,10 +337,8 @@ class ApiClient {
const signal = options.signal || abortController.signal;
let messageId = '';
let accumulatedContent = '';
let currentMessage: Partial<Types.ChatMessage> = {};
const promise = new Promise<Types.ChatMessage>(async (resolve, reject) => {
const promise = new Promise<Types.ChatMessage[]>(async (resolve, reject) => {
try {
const response = await fetch(`${this.baseUrl}/chat/sessions/${sessionId}/messages/stream`, {
method: 'POST',
@ -374,78 +361,65 @@ class ApiClient {
}
const decoder = new TextDecoder();
let buffer = '';
let chatMessage: Types.ChatMessage | null = null;
const chatMessageList : Types.ChatMessage[] = [];
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
if (done) {
// Stream ended naturally - create final message
break;
}
const chunk = decoder.decode(value, { stream: true });
const lines = chunk.split('\n');
buffer += decoder.decode(value, { stream: true });
// Process complete lines
const lines = buffer.split('\n');
buffer = lines.pop() || ''; // Keep incomplete line in buffer
for (const line of lines) {
if (line.trim() === '') continue;
if (line.trim() === '') continue; // Skip blank lines between SSEs
try {
// Handle Server-Sent Events format
if (line.startsWith('data: ')) {
const data = line.slice(6);
const data = line.slice(5).trim();
const incoming: Types.ChatMessageBase = JSON.parse(data);
if (data === '[DONE]') {
// Stream completed
const finalMessage: Types.ChatMessage = {
id: messageId,
sessionId,
status: 'done',
sender: currentMessage.sender || 'ai',
content: accumulatedContent,
timestamp: currentMessage.timestamp || new Date(),
...currentMessage
};
// Trigger callbacks based on status
if (incoming.status !== chatMessage?.status) {
options.onStatusChange?.(incoming.status);
}
options.onComplete?.(finalMessage);
resolve(finalMessage);
return;
// Handle different status types
switch (incoming.status) {
case 'streaming':
if (chatMessage === null) {
chatMessage = {...incoming};
} else {
// Can't do a simple += as typescript thinks .content might not be there
chatMessage.content = (chatMessage?.content || '') + incoming.content;
}
options.onStreaming?.(incoming);
break;
case 'error':
options.onError?.(incoming);
break;
default:
chatMessageList.push(incoming);
options.onMessage?.(incoming);
break;
}
const messageChunk: ChatMessageChunk = JSON.parse(data);
// Update accumulated state
if (messageChunk.id) messageId = messageChunk.id;
if (messageChunk.content) {
accumulatedContent += messageChunk.content;
}
// Update current message properties
Object.assign(currentMessage, {
...messageChunk,
content: accumulatedContent
});
// Trigger callbacks
if (messageChunk.status) {
options.onStatusChange?.(messageChunk.status);
}
if (messageChunk.isPartial) {
options.onPartialMessage?.(messageChunk.content, messageId);
}
const currentCompleteMessage: Types.ChatMessage = {
id: messageId,
sessionId,
status: messageChunk.status,
sender: messageChunk.sender,
content: accumulatedContent,
timestamp: messageChunk.timestamp,
...currentMessage
};
options.onMessage?.(currentCompleteMessage);
}
} catch (parseError) {
console.warn('Failed to parse SSE chunk:', parseError);
} catch (error) {
console.warn('Failed to process SSE:', error);
if (error instanceof Error) {
options.onWarn?.(error.message);
}
// Continue processing other lines
}
}
@ -454,25 +428,15 @@ class ApiClient {
reader.releaseLock();
}
// If we get here without a [DONE] signal, create final message
const finalMessage: Types.ChatMessage = {
id: messageId || `msg_${Date.now()}`,
sessionId,
status: 'done',
sender: currentMessage.sender || 'ai',
content: accumulatedContent,
timestamp: currentMessage.timestamp || new Date(),
...currentMessage
};
options.onComplete?.(finalMessage);
resolve(finalMessage);
options.onComplete?.();
resolve(chatMessageList);
} catch (error) {
if (signal.aborted) {
options.onComplete?.();
reject(new Error('Request was aborted'));
} else {
options.onError?.(error as Error);
options.onError?.((error as Error).message);
options.onComplete?.();
reject(error);
}
}
@ -492,15 +456,15 @@ class ApiClient {
sessionId: string,
query: Types.ChatQuery,
options?: StreamingOptions
): Promise<Types.ChatMessage> {
): Promise<Types.ChatMessage[]> {
// If streaming options are provided, use streaming
if (options && (options.onMessage || options.onPartialMessage || options.onStatusChange)) {
if (options && (options.onMessage || options.onStreaming || options.onStatusChange)) {
const streamResponse = this.sendMessageStream(sessionId, query, options);
return streamResponse.promise;
}
// Otherwise, use standard response
return this.sendMessage(sessionId, query);
return [await this.sendMessage(sessionId, query)];
}
async getChatMessages(sessionId: string, request: Partial<PaginatedRequest> = {}): Promise<PaginatedResponse<Types.ChatMessage>> {
@ -737,4 +701,4 @@ await apiClient.sendMessageAuto(sessionId, 'Quick question'); // Will use standa
*/
export { ApiClient }
export type { StreamingOptions, StreamingResponse, ChatMessageChunk };
export type { StreamingOptions, StreamingResponse };

View File

@ -1,6 +1,6 @@
// Generated TypeScript types from Pydantic models
// Source: src/backend/models.py
// Generated on: 2025-05-29T05:47:25.809967
// Generated on: 2025-05-29T21:15:06.572082
// DO NOT EDIT MANUALLY - This file is auto-generated
// ============================
@ -15,9 +15,11 @@ export type ApplicationStatus = "applied" | "reviewing" | "interview" | "offer"
export type ChatContextType = "job_search" | "candidate_screening" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile";
export type ChatSenderType = "user" | "ai" | "system";
export type ChatMessageType = "error" | "generating" | "info" | "preparing" | "processing" | "response" | "searching" | "system" | "thinking" | "tooling" | "user";
export type ChatStatusType = "preparing" | "thinking" | "partial" | "streaming" | "done" | "error";
export type ChatSenderType = "user" | "assistant" | "system";
export type ChatStatusType = "initializing" | "streaming" | "done" | "error";
export type ColorBlindMode = "protanopia" | "deuteranopia" | "tritanopia" | "none";
@ -231,17 +233,26 @@ export interface ChatContext {
export interface ChatMessage {
id?: string;
sessionId: string;
status: "preparing" | "thinking" | "partial" | "streaming" | "done" | "error";
sender: "user" | "ai" | "system";
senderId?: string;
prompt?: string;
content?: string;
chunk?: string;
status: "initializing" | "streaming" | "done" | "error";
type: "error" | "generating" | "info" | "preparing" | "processing" | "response" | "searching" | "system" | "thinking" | "tooling" | "user";
sender: "user" | "assistant" | "system";
timestamp: Date;
isEdited?: boolean;
content?: string;
metadata?: ChatMessageMetaData;
}
export interface ChatMessageBase {
id?: string;
sessionId: string;
senderId?: string;
status: "initializing" | "streaming" | "done" | "error";
type: "error" | "generating" | "info" | "preparing" | "processing" | "response" | "searching" | "system" | "thinking" | "tooling" | "user";
sender: "user" | "assistant" | "system";
timestamp: Date;
content?: string;
}
export interface ChatMessageMetaData {
model?: "qwen2.5" | "flux-schnell";
temperature?: number;
@ -261,6 +272,17 @@ export interface ChatMessageMetaData {
timers?: Record<string, number>;
}
export interface ChatMessageUser {
id?: string;
sessionId: string;
senderId?: string;
status: "initializing" | "streaming" | "done" | "error";
type?: "error" | "generating" | "info" | "preparing" | "processing" | "response" | "searching" | "system" | "thinking" | "tooling" | "user";
sender: "user" | "assistant" | "system";
timestamp: Date;
content?: string;
}
export interface ChatOptions {
seed?: number;
numCtx?: number;

View File

@ -20,8 +20,9 @@
"noEmit": true,
"jsx": "react-jsx",
"baseUrl": "src",
"sourceMap": true
},
"include": [
"src/**/*"
]
"include": [
"src/**/*"
]
}

View File

@ -21,12 +21,13 @@ import asyncio
from datetime import datetime, UTC
from prometheus_client import Counter, Summary, CollectorRegistry # type: ignore
from models import ( ChatQuery, ChatMessage, Tunables, ChatStatusType, ChatMessageMetaData)
from models import ( ChatQuery, ChatMessage, ChatOptions, ChatMessageBase, ChatMessageUser, Tunables, ChatMessageType, ChatSenderType, ChatStatusType, ChatMessageMetaData)
from logger import logger
import defines
from .registry import agent_registry
from metrics import Metrics
from database import RedisDatabase # type: ignore
import model_cast
class LLMMessage(BaseModel):
role: str = Field(default="")
@ -342,15 +343,25 @@ class Agent(BaseModel, ABC):
async def generate(
self, llm: Any, model: str, query: ChatQuery, session_id: str, user_id: str, temperature=0.7
) -> AsyncGenerator[ChatMessage, None]:
) -> AsyncGenerator[ChatMessage | ChatMessageBase, None]:
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
user_message = ChatMessageUser(
session_id=session_id,
tunables=query.tunables,
type=ChatMessageType.USER,
status=ChatStatusType.DONE,
sender=ChatSenderType.USER,
content=query.prompt.strip(),
timestamp=datetime.now(UTC)
)
chat_message = ChatMessage(
session_id=session_id,
prompt=query.prompt,
tunables=query.tunables,
status=ChatStatusType.PREPARING,
sender="user",
status=ChatStatusType.INITIALIZING,
type=ChatMessageType.PREPARING,
sender=ChatSenderType.ASSISTANT,
content="",
timestamp=datetime.now(UTC)
)
@ -361,28 +372,22 @@ class Agent(BaseModel, ABC):
messages: List[LLMMessage] = [
LLMMessage(role="system", content=self.system_prompt)
]
messages.extend(
[
item
for m in self.conversation
for item in [
LLMMessage(role="user", content=m.prompt.strip() if m.prompt else ""),
LLMMessage(role="assistant", content=m.response.strip()),
]
]
)
messages.extend([
LLMMessage(role=m.sender, content=m.content.strip())
for m in self.conversation
])
# Only the actual user query is provided with the full context message
messages.append(
LLMMessage(role="user", content=query.prompt.strip())
LLMMessage(role=user_message.sender, content=user_message.content.strip())
)
# message.messages = messages
chat_message.metadata = ChatMessageMetaData()
chat_message.metadata.options = {
"seed": 8911,
"num_ctx": self.context_size,
"temperature": temperature, # Higher temperature to encourage tool usage
}
chat_message.metadata.options = ChatOptions(
seed=8911,
num_ctx=self.context_size,
temperature=temperature, # Higher temperature to encourage tool usage
)
# Create a dict for storing various timing stats
chat_message.metadata.timers = {}
@ -488,17 +493,21 @@ class Agent(BaseModel, ABC):
# return
# not use_tools
chat_message.status = ChatStatusType.THINKING
chat_message.type = ChatMessageType.THINKING
chat_message.content = f"Generating response..."
yield chat_message
# Reset the response for streaming
chat_message.content = ""
start_time = time.perf_counter()
chat_message.type = ChatMessageType.GENERATING
chat_message.status = ChatStatusType.STREAMING
for response in llm.chat(
model=model,
messages=messages,
options={
**chat_message.metadata.options,
**chat_message.metadata.model_dump(exclude_unset=True),
},
stream=True,
):
@ -508,12 +517,13 @@ class Agent(BaseModel, ABC):
yield chat_message
return
chat_message.status = ChatStatusType.STREAMING
chat_message.chunk = response.message.content
chat_message.content += chat_message.chunk
chat_message.content += response.message.content
if not response.done:
chat_chunk = model_cast.cast_to_model(ChatMessageBase, chat_message)
chat_chunk.content = response.message.content
yield chat_message
continue
if response.done:
self.collect_metrics(response)
@ -524,12 +534,15 @@ class Agent(BaseModel, ABC):
self.context_tokens = (
response.prompt_eval_count + response.eval_count
)
chat_message.type = ChatMessageType.RESPONSE
chat_message.status = ChatStatusType.DONE
yield chat_message
end_time = time.perf_counter()
chat_message.metadata.timers["streamed"] = end_time - start_time
chat_message.status = ChatStatusType.DONE
# Add the user and chat messages to the conversation
self.conversation.append(user_message)
self.conversation.append(chat_message)
return

View File

@ -31,12 +31,13 @@ from models import (
Job, JobApplication, ApplicationStatus,
# Chat models
ChatSession, ChatMessage, ChatContext, ChatQuery,
ChatSession, ChatMessage, ChatContext, ChatQuery, ChatStatusType, ChatMessageBase,
# Supporting models
Location, Skill, WorkExperience, Education
)
import model_cast
import defines
import agents
from logger import logger
@ -343,7 +344,7 @@ async def login(
expiresAt=int((datetime.now(UTC) + timedelta(hours=24)).timestamp())
)
return create_success_response(auth_response.model_dump(by_alias=True))
return create_success_response(auth_response.model_dump(by_alias=True, exclude_unset=True))
except Exception as e:
logger.error(f"⚠️ Login error: {e}")
@ -531,7 +532,7 @@ async def refresh_token_endpoint(
expiresAt=int((datetime.now(UTC) + timedelta(hours=24)).timestamp())
)
return create_success_response(auth_response.model_dump(by_alias=True))
return create_success_response(auth_response.model_dump(by_alias=True, exclude_unset=True))
except jwt.PyJWTError:
return JSONResponse(
@ -578,7 +579,7 @@ async def create_candidate(
"type": "candidate"
})
return create_success_response(candidate.model_dump(by_alias=True))
return create_success_response(candidate.model_dump(by_alias=True, exclude_unset=True))
except Exception as e:
logger.error(f"Candidate creation error: {e}")
@ -614,7 +615,7 @@ async def get_candidate(
)
candidate = Candidate.model_validate(candidates_list[0])
return create_success_response(candidate.model_dump(by_alias=True))
return create_success_response(candidate.model_dump(by_alias=True, exclude_unset=True))
except Exception as e:
logger.error(f"Get candidate error: {e}")
@ -656,7 +657,7 @@ async def update_candidate(
updated_candidate = Candidate.model_validate(candidate_dict)
await database.set_candidate(candidate_id, updated_candidate.model_dump())
return create_success_response(updated_candidate.model_dump(by_alias=True))
return create_success_response(updated_candidate.model_dump(by_alias=True, exclude_unset=True))
except Exception as e:
logger.error(f"Update candidate error: {e}")
@ -690,7 +691,7 @@ async def get_candidates(
)
paginated_response = create_paginated_response(
[c.model_dump(by_alias=True) for c in paginated_candidates],
[c.model_dump(by_alias=True, exclude_unset=True) for c in paginated_candidates],
page, limit, total
)
@ -739,7 +740,7 @@ async def search_candidates(
)
paginated_response = create_paginated_response(
[c.model_dump(by_alias=True) for c in paginated_candidates],
[c.model_dump(by_alias=True, exclude_unset=True) for c in paginated_candidates],
page, limit, total
)
@ -781,7 +782,7 @@ async def create_job(
job = Job.model_validate(job_data)
await database.set_job(job.id, job.model_dump())
return create_success_response(job.model_dump(by_alias=True))
return create_success_response(job.model_dump(by_alias=True, exclude_unset=True))
except Exception as e:
logger.error(f"Job creation error: {e}")
@ -809,7 +810,7 @@ async def get_job(
await database.set_job(job_id, job_data)
job = Job.model_validate(job_data)
return create_success_response(job.model_dump(by_alias=True))
return create_success_response(job.model_dump(by_alias=True, exclude_unset=True))
except Exception as e:
logger.error(f"Get job error: {e}")
@ -842,7 +843,7 @@ async def get_jobs(
)
paginated_response = create_paginated_response(
[j.model_dump(by_alias=True) for j in paginated_jobs],
[j.model_dump(by_alias=True, exclude_unset=True) for j in paginated_jobs],
page, limit, total
)
@ -887,7 +888,7 @@ async def search_jobs(
)
paginated_response = create_paginated_response(
[j.model_dump(by_alias=True) for j in paginated_jobs],
[j.model_dump(by_alias=True, exclude_unset=True) for j in paginated_jobs],
page, limit, total
)
@ -921,7 +922,7 @@ async def create_chat_session(
await database.set_chat_session(chat_session.id, chat_session.model_dump())
logger.info(f"✅ Chat session created: {chat_session.id} for user {current_user.id}")
return create_success_response(chat_session.model_dump(by_alias=True))
return create_success_response(chat_session.model_dump(by_alias=True, exclude_unset=True))
except Exception as e:
logger.error(f"Chat session creation error: {e}")
@ -946,7 +947,7 @@ async def get_chat_session(
)
chat_session = ChatSession.model_validate(chat_session_data)
return create_success_response(chat_session.model_dump(by_alias=True))
return create_success_response(chat_session.model_dump(by_alias=True, exclude_unset=True))
except Exception as e:
logger.error(f"Get chat session error: {e}")
@ -986,7 +987,7 @@ async def get_chat_session_messages(
messages_list, page, limit, sortBy, sortOrder, filter_dict
)
paginated_response = create_paginated_response(
[m.model_dump(by_alias=True) for m in paginated_messages],
[m.model_dump(by_alias=True, exclude_unset=True) for m in paginated_messages],
page, limit, total
)
@ -1034,27 +1035,37 @@ async def post_chat_session_message_stream(
)
async def message_stream_generator():
"""Generator to stream messages"""
async for message in chat_agent.generate(
last_log = None
async for chat_message in chat_agent.generate(
llm=llm_manager.get_llm(),
model=defines.model,
query=chat_query,
session_id=session_id,
user_id=current_user.id,
):
json_data = message.model_dump(mode='json', by_alias=True)
# If the message is not done, convert it to a ChatMessageBase to remove
# metadata and other unnecessary fields
if chat_message.status != ChatStatusType.DONE:
chat_message = model_cast.cast_to_model(ChatMessageBase, chat_message)
json_data = chat_message.model_dump(mode='json', by_alias=True, exclude_unset=True)
json_str = json.dumps(json_data)
logger.info(f"🔗 Streaming message for session {session_id}: {json_str}")
yield json_str + "\n"
log = f"🔗 Message status={chat_message.status}, type={chat_message.type}"
if last_log != log:
last_log = log
logger.info(log)
yield f"data: {json_str}\n\n"
return StreamingResponse(
message_stream_generator(),
media_type="application/json",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Prevents Nginx buffering if you're using it
},
)
message_stream_generator(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
#"Access-Control-Allow-Origin": "*", # CORS
"X-Accel-Buffering": "no", # Prevents Nginx buffering if you're using it
},
)
except Exception as e:
logger.error(traceback.format_exc())
@ -1090,7 +1101,7 @@ async def get_chat_sessions(
)
paginated_response = create_paginated_response(
[s.model_dump(by_alias=True) for s in paginated_sessions],
[s.model_dump(by_alias=True, exclude_unset=True) for s in paginated_sessions],
page, limit, total
)

14
src/backend/model_cast.py Normal file
View File

@ -0,0 +1,14 @@
from typing import Type, TypeVar
from pydantic import BaseModel # type: ignore
import copy
T = TypeVar('T', bound=BaseModel)
def cast_to_model(model_cls: Type[T], source: BaseModel) -> T:
data = {field: getattr(source, field) for field in model_cls.__fields__}
return model_cls(**data)
def cast_to_model_safe(model_cls: Type[T], source: BaseModel) -> T:
data = {field: copy.deepcopy(getattr(source, field)) for field in model_cls.__fields__}
return model_cls(**data)

View File

@ -64,13 +64,24 @@ class InterviewRecommendation(str, Enum):
class ChatSenderType(str, Enum):
USER = "user"
AI = "ai"
ASSISTANT = "assistant"
SYSTEM = "system"
class ChatStatusType(str, Enum):
class ChatMessageType(str, Enum):
ERROR = "error"
GENERATING = "generating"
INFO = "info"
PREPARING = "preparing"
PROCESSING = "processing"
RESPONSE = "response"
SEARCHING = "searching"
SYSTEM = "system"
THINKING = "thinking"
PARTIAL = "partial"
TOOLING = "tooling"
USER = "user"
class ChatStatusType(str, Enum):
INITIALIZING = "initializing"
STREAMING = "streaming"
DONE = "done"
ERROR = "error"
@ -572,24 +583,28 @@ class ChatMessageMetaData(BaseModel):
class Config:
populate_by_name = True # Allow both field names and aliases
class ChatMessage(BaseModel):
class ChatMessageBase(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
session_id: str = Field(..., alias="sessionId")
status: ChatStatusType
sender: ChatSenderType
sender_id: Optional[str] = Field(None, alias="senderId")
prompt: str = ""
content: str = ""
chunk: str = ""
status: ChatStatusType
type: ChatMessageType
sender: ChatSenderType
timestamp: datetime
#attachments: Optional[List[Attachment]] = None
#reactions: Optional[List[MessageReaction]] = None
is_edited: bool = Field(False, alias="isEdited")
#edit_history: Optional[List[EditHistory]] = Field(None, alias="editHistory")
metadata: ChatMessageMetaData = Field(None)
content: str = ""
class Config:
populate_by_name = True # Allow both field names and aliases
class ChatMessageUser(ChatMessageBase):
type: ChatMessageType = ChatMessageType.USER
class ChatMessage(ChatMessageBase):
#attachments: Optional[List[Attachment]] = None
#reactions: Optional[List[MessageReaction]] = None
#is_edited: bool = Field(False, alias="isEdited")
#edit_history: Optional[List[EditHistory]] = Field(None, alias="editHistory")
metadata: ChatMessageMetaData = Field(None)
class ChatSession(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
user_id: Optional[str] = Field(None, alias="userId")

View File

@ -1,19 +1,6 @@
#!/bin/bash
# Ensure input was provided
if [[ -z "$1" ]]; then
TARGET=$(readlink -f "src/server.py")
else
TARGET=$(readlink -f "$1")
fi
# Resolve user-supplied path to absolute path
if [[ ! -f "$TARGET" ]]; then
echo "Target file '$TARGET' not found."
exit 1
fi
get_pid() {
# Loop through python processes and resolve each script path
PID=""
for pid in $(pgrep -f python); do
@ -32,9 +19,33 @@ for pid in $(pgrep -f python); do
fi
fi
done
}
if [[ -z "$1" ]]; then
for file in "src/server.py" "src/backend/main.py"; do
echo "Checking ${file}"
# Ensure input was provided
TARGET=$(readlink -f "$file")
if [[ ! -f "$TARGET" ]]; then
echo "Target file '$TARGET' not found."
exit 1
fi
get_pid
if [[ "${PID}" != "" ]]; then
break
fi
done
else
TARGET=$(readlink -f "$1")
if [[ ! -f "$TARGET" ]]; then
echo "Target file '$TARGET' not found."
exit 1
fi
get_pid
fi
if [[ -z "$PID" ]]; then
echo "No Python process found running '$TARGET'."
echo "No Python process found running."
exit 1
fi