Works again (minus most Controls)
This commit is contained in:
parent
09c8c45afc
commit
a29f51ac9b
@ -23,10 +23,10 @@ The backstory about Backstory...
|
|||||||
|
|
||||||
## Some questions I've been asked
|
## Some questions I've been asked
|
||||||
|
|
||||||
Q. <ChatQuery text="Why aren't you providing this as a Platform As a Service (PaaS) application?"/>
|
Q. <ChatQuery prompt="Why aren't you providing this as a Platform As a Service (PaaS) application?" tunables={{ "enable_tools": false }} />
|
||||||
|
|
||||||
A. I could; but I don't want to store your data. I also don't want to have to be on the hook for support of this service. I like it, it's fun, but it's not what I want as my day-gig, you know? If it was, I wouldn't be looking for a job...
|
A. I could; but I don't want to store your data. I also don't want to have to be on the hook for support of this service. I like it, it's fun, but it's not what I want as my day-gig, you know? If it was, I wouldn't be looking for a job...
|
||||||
|
|
||||||
Q. <ChatQuery text="Why can't I just ask Backstory these questions?"/>
|
Q. <ChatQuery prompt="Why can't I just ask Backstory these questions?" tunables={{ "enable_tools": false }} />
|
||||||
|
|
||||||
A. Try it. See what you find out :)
|
A. Try it. See what you find out :)
|
@ -19,11 +19,12 @@ import { SxProps } from '@mui/material';
|
|||||||
|
|
||||||
|
|
||||||
import { ResumeBuilder } from './ResumeBuilder';
|
import { ResumeBuilder } from './ResumeBuilder';
|
||||||
import { Message, ChatQuery, MessageList } from './Message';
|
import { Message, MessageList } from './Message';
|
||||||
import { Snack, SeverityType } from './Snack';
|
import { Snack, SeverityType } from './Snack';
|
||||||
import { VectorVisualizer } from './VectorVisualizer';
|
import { VectorVisualizer } from './VectorVisualizer';
|
||||||
import { Controls } from './Controls';
|
import { Controls } from './Controls';
|
||||||
import { Conversation, ConversationHandle } from './Conversation';
|
import { Conversation, ConversationHandle } from './Conversation';
|
||||||
|
import { ChatQuery, QueryOptions } from './ChatQuery';
|
||||||
import { Scrollable } from './AutoScroll';
|
import { Scrollable } from './AutoScroll';
|
||||||
import { BackstoryTab } from './BackstoryTab';
|
import { BackstoryTab } from './BackstoryTab';
|
||||||
|
|
||||||
@ -112,9 +113,9 @@ const App = () => {
|
|||||||
fetchAbout();
|
fetchAbout();
|
||||||
}, [about, setAbout])
|
}, [about, setAbout])
|
||||||
|
|
||||||
const handleSubmitChatQuery = (query: string) => {
|
const handleSubmitChatQuery = (prompt: string, tunables?: QueryOptions) => {
|
||||||
console.log(`handleSubmitChatQuery: ${query} -- `, chatRef.current ? ' sending' : 'no handler');
|
console.log(`handleSubmitChatQuery: ${prompt} ${tunables || {}} -- `, chatRef.current ? ' sending' : 'no handler');
|
||||||
chatRef.current?.submitQuery(query);
|
chatRef.current?.submitQuery(prompt, tunables);
|
||||||
setActiveTab(0);
|
setActiveTab(0);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -137,10 +138,10 @@ const App = () => {
|
|||||||
|
|
||||||
const backstoryQuestions = [
|
const backstoryQuestions = [
|
||||||
<Box sx={{ display: "flex", flexDirection: isMobile ? "column" : "row" }}>
|
<Box sx={{ display: "flex", flexDirection: isMobile ? "column" : "row" }}>
|
||||||
<ChatQuery text="What is James Ketrenos' work history?" submitQuery={handleSubmitChatQuery} />
|
<ChatQuery prompt="What is James Ketrenos' work history?" tunables={{ enable_tools: false }} submitQuery={handleSubmitChatQuery} />
|
||||||
<ChatQuery text="What programming languages has James used?" submitQuery={handleSubmitChatQuery} />
|
<ChatQuery prompt="What programming languages has James used?" tunables={{ enable_tools: false }} submitQuery={handleSubmitChatQuery} />
|
||||||
<ChatQuery text="What are James' professional strengths?" submitQuery={handleSubmitChatQuery} />
|
<ChatQuery prompt="What are James' professional strengths?" tunables={{ enable_tools: false }} submitQuery={handleSubmitChatQuery} />
|
||||||
<ChatQuery text="What are today's headlines on CNBC.com?" submitQuery={handleSubmitChatQuery} />
|
<ChatQuery prompt="What are today's headlines on CNBC.com?" tunables={{ enable_tools: true, enable_rag: false, enable_context: false }} submitQuery={handleSubmitChatQuery} />
|
||||||
</Box>,
|
</Box>,
|
||||||
<Box sx={{ p: 1 }}>
|
<Box sx={{ p: 1 }}>
|
||||||
<MuiMarkdown>
|
<MuiMarkdown>
|
||||||
|
48
frontend/src/ChatQuery.tsx
Normal file
48
frontend/src/ChatQuery.tsx
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
import Box from '@mui/material/Box';
|
||||||
|
import Button from '@mui/material/Button';
|
||||||
|
|
||||||
|
type QueryOptions = {
|
||||||
|
enable_rag?: boolean,
|
||||||
|
enable_tools?: boolean,
|
||||||
|
enable_context?: boolean,
|
||||||
|
};
|
||||||
|
|
||||||
|
interface ChatQueryInterface {
|
||||||
|
prompt: string,
|
||||||
|
tunables?: QueryOptions,
|
||||||
|
submitQuery?: (prompt: string, tunables?: QueryOptions) => void
|
||||||
|
}
|
||||||
|
|
||||||
|
const ChatQuery = (props : ChatQueryInterface) => {
|
||||||
|
const { prompt, submitQuery } = props;
|
||||||
|
let tunables = props.tunables;
|
||||||
|
|
||||||
|
if (typeof (tunables) === "string") {
|
||||||
|
tunables = JSON.parse(tunables);
|
||||||
|
}
|
||||||
|
console.log(tunables);
|
||||||
|
|
||||||
|
if (submitQuery === undefined) {
|
||||||
|
return (<Box>{prompt}</Box>);
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<Button variant="outlined" sx={{
|
||||||
|
color: theme => theme.palette.custom.highlight, // Golden Ochre (#D4A017)
|
||||||
|
borderColor: theme => theme.palette.custom.highlight,
|
||||||
|
m: 1
|
||||||
|
}}
|
||||||
|
size="small" onClick={(e: any) => { submitQuery(prompt, tunables); }}>
|
||||||
|
{prompt}
|
||||||
|
</Button>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export type {
|
||||||
|
ChatQueryInterface,
|
||||||
|
QueryOptions,
|
||||||
|
};
|
||||||
|
|
||||||
|
export {
|
||||||
|
ChatQuery,
|
||||||
|
};
|
||||||
|
|
@ -319,7 +319,7 @@ const Controls = ({ sessionId, setSnack, connectionBase }: ControlsParams) => {
|
|||||||
};
|
};
|
||||||
|
|
||||||
return (<div className="Controls">
|
return (<div className="Controls">
|
||||||
<Typography component="span" sx={{ mb: 1 }}>
|
{/* <Typography component="span" sx={{ mb: 1 }}>
|
||||||
You can change the information available to the LLM by adjusting the following settings:
|
You can change the information available to the LLM by adjusting the following settings:
|
||||||
</Typography>
|
</Typography>
|
||||||
<Accordion>
|
<Accordion>
|
||||||
@ -414,7 +414,8 @@ const Controls = ({ sessionId, setSnack, connectionBase }: ControlsParams) => {
|
|||||||
)
|
)
|
||||||
}</FormGroup>
|
}</FormGroup>
|
||||||
</AccordionActions>
|
</AccordionActions>
|
||||||
</Accordion>
|
</Accordion> */}
|
||||||
|
|
||||||
<Accordion>
|
<Accordion>
|
||||||
<AccordionSummary expandIcon={<ExpandMoreIcon />}>
|
<AccordionSummary expandIcon={<ExpandMoreIcon />}>
|
||||||
<Typography component="span">System Information</Typography>
|
<Typography component="span">System Information</Typography>
|
||||||
@ -426,8 +427,9 @@ const Controls = ({ sessionId, setSnack, connectionBase }: ControlsParams) => {
|
|||||||
<SystemInfoComponent systemInfo={systemInfo} />
|
<SystemInfoComponent systemInfo={systemInfo} />
|
||||||
</AccordionActions>
|
</AccordionActions>
|
||||||
</Accordion>
|
</Accordion>
|
||||||
<Button startIcon={<ResetIcon />} onClick={() => { reset(["history"], "History cleared."); }}>Delete Backstory History</Button>
|
|
||||||
<Button onClick={() => { reset(["rags", "tools", "system_prompt", "message_history_length"], "Default settings restored.") }}>Reset system prompt, tunables, and RAG to defaults</Button>
|
{/* <Button startIcon={<ResetIcon />} onClick={() => { reset(["history"], "History cleared."); }}>Delete Backstory History</Button>
|
||||||
|
<Button onClick={() => { reset(["rags", "tools", "system_prompt", "message_history_length"], "Default settings restored.") }}>Reset system prompt, tunables, and RAG to defaults</Button> */}
|
||||||
</div>);
|
</div>);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ import { SetSnackType } from './Snack';
|
|||||||
import { ContextStatus } from './ContextStatus';
|
import { ContextStatus } from './ContextStatus';
|
||||||
import { useAutoScrollToBottom } from './AutoScroll';
|
import { useAutoScrollToBottom } from './AutoScroll';
|
||||||
import { DeleteConfirmation } from './DeleteConfirmation';
|
import { DeleteConfirmation } from './DeleteConfirmation';
|
||||||
|
import { QueryOptions } from './ChatQuery';
|
||||||
import './Conversation.css';
|
import './Conversation.css';
|
||||||
|
|
||||||
const loadingMessage: MessageData = { "role": "status", "content": "Establishing connection with server..." };
|
const loadingMessage: MessageData = { "role": "status", "content": "Establishing connection with server..." };
|
||||||
@ -21,7 +21,7 @@ const loadingMessage: MessageData = { "role": "status", "content": "Establishing
|
|||||||
type ConversationMode = 'chat' | 'job_description' | 'resume' | 'fact_check';
|
type ConversationMode = 'chat' | 'job_description' | 'resume' | 'fact_check';
|
||||||
|
|
||||||
interface ConversationHandle {
|
interface ConversationHandle {
|
||||||
submitQuery: (query: string) => void;
|
submitQuery: (prompt: string, options?: QueryOptions) => void;
|
||||||
}
|
}
|
||||||
|
|
||||||
interface BackstoryMessage {
|
interface BackstoryMessage {
|
||||||
@ -54,6 +54,7 @@ interface ConversationProps {
|
|||||||
setSnack: SetSnackType, // Callback to display snack popups
|
setSnack: SetSnackType, // Callback to display snack popups
|
||||||
defaultPrompts?: React.ReactElement[], // Set of Elements to display after the TextField
|
defaultPrompts?: React.ReactElement[], // Set of Elements to display after the TextField
|
||||||
defaultQuery?: string, // Default text to populate the TextField input
|
defaultQuery?: string, // Default text to populate the TextField input
|
||||||
|
emptyPrompt?: string, // If input is not shown and an action is taken, send this prompt
|
||||||
preamble?: MessageList, // Messages to display at start of Conversation until Action has been invoked
|
preamble?: MessageList, // Messages to display at start of Conversation until Action has been invoked
|
||||||
hidePreamble?: boolean, // Whether to hide the preamble after an Action has been invoked
|
hidePreamble?: boolean, // Whether to hide the preamble after an Action has been invoked
|
||||||
hideDefaultPrompts?: boolean, // Whether to hide the defaultPrompts after an Action has been invoked
|
hideDefaultPrompts?: boolean, // Whether to hide the defaultPrompts after an Action has been invoked
|
||||||
@ -67,6 +68,7 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>(({
|
|||||||
className,
|
className,
|
||||||
type,
|
type,
|
||||||
prompt,
|
prompt,
|
||||||
|
emptyPrompt,
|
||||||
actionLabel,
|
actionLabel,
|
||||||
resetAction,
|
resetAction,
|
||||||
multiline,
|
multiline,
|
||||||
@ -256,8 +258,8 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>(({
|
|||||||
};
|
};
|
||||||
|
|
||||||
useImperativeHandle(ref, () => ({
|
useImperativeHandle(ref, () => ({
|
||||||
submitQuery: (query: string) => {
|
submitQuery: (query: string, tunables?: QueryOptions) => {
|
||||||
sendQuery(query);
|
sendQuery(query, tunables);
|
||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
|
|
||||||
@ -303,38 +305,34 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>(({
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const sendQuery = async (query: string) => {
|
const sendQuery = async (request: string, options?: QueryOptions) => {
|
||||||
query = query.trim();
|
request = request.trim();
|
||||||
|
|
||||||
// If the query was empty, a default query was provided,
|
// If the query was empty, a default query was provided,
|
||||||
// and there is no prompt for the user, send the default query.
|
// and there is no prompt for the user, send the default query.
|
||||||
if (!query && defaultQuery && !prompt) {
|
if (!request && defaultQuery && !prompt) {
|
||||||
query = defaultQuery.trim();
|
request = defaultQuery.trim();
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the query is empty, and a prompt was provided, do not
|
// Do not send an empty query.
|
||||||
// send an empty query.
|
if (!request) {
|
||||||
if (!query && prompt) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
setNoInteractions(false);
|
setNoInteractions(false);
|
||||||
|
|
||||||
if (query) {
|
setConversation([
|
||||||
setConversation([
|
...conversationRef.current,
|
||||||
...conversationRef.current,
|
{
|
||||||
{
|
role: 'user',
|
||||||
role: 'user',
|
origin: type,
|
||||||
origin: type,
|
content: request,
|
||||||
content: query,
|
disableCopy: true
|
||||||
disableCopy: true
|
}
|
||||||
}
|
]);
|
||||||
]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a small delay to ensure React has time to update the UI
|
// Add a small delay to ensure React has time to update the UI
|
||||||
await new Promise(resolve => setTimeout(resolve, 0));
|
await new Promise(resolve => setTimeout(resolve, 0));
|
||||||
console.log(conversation);
|
|
||||||
|
|
||||||
// Clear input
|
// Clear input
|
||||||
setQuery('');
|
setQuery('');
|
||||||
@ -353,13 +351,25 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>(({
|
|||||||
await new Promise(resolve => setTimeout(resolve, 0));
|
await new Promise(resolve => setTimeout(resolve, 0));
|
||||||
|
|
||||||
// Make the fetch request with proper headers
|
// Make the fetch request with proper headers
|
||||||
|
let query;
|
||||||
|
if (options) {
|
||||||
|
query = {
|
||||||
|
options: options,
|
||||||
|
prompt: request.trim()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
query = {
|
||||||
|
prompt: request.trim()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const response = await fetch(connectionBase + `/api/chat/${sessionId}/${type}`, {
|
const response = await fetch(connectionBase + `/api/chat/${sessionId}/${type}`, {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: {
|
headers: {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
'Accept': 'application/json',
|
'Accept': 'application/json',
|
||||||
},
|
},
|
||||||
body: JSON.stringify({ role: 'user', content: query.trim() }),
|
body: JSON.stringify(query)
|
||||||
});
|
});
|
||||||
|
|
||||||
// We'll guess that the response will be around 500 tokens...
|
// We'll guess that the response will be around 500 tokens...
|
||||||
@ -383,14 +393,14 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>(({
|
|||||||
let buffer = '';
|
let buffer = '';
|
||||||
|
|
||||||
const process_line = async (line: string) => {
|
const process_line = async (line: string) => {
|
||||||
const update = JSON.parse(line);
|
let update = JSON.parse(line);
|
||||||
|
|
||||||
switch (update.status) {
|
switch (update.status) {
|
||||||
case 'done':
|
case 'done':
|
||||||
console.log('Done processing:', update);
|
console.log('Done processing:', update);
|
||||||
// Replace processing message with final result
|
// Replace processing message with final result
|
||||||
if (onResponse) {
|
if (onResponse) {
|
||||||
update.message = onResponse(update);
|
update = onResponse(update);
|
||||||
}
|
}
|
||||||
setProcessingMessage(undefined);
|
setProcessingMessage(undefined);
|
||||||
const backstoryMessage: BackstoryMessage = update;
|
const backstoryMessage: BackstoryMessage = update;
|
||||||
@ -451,6 +461,7 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>(({
|
|||||||
await process_line(line);
|
await process_line(line);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
setSnack("Error processing query", "error")
|
setSnack("Error processing query", "error")
|
||||||
|
console.error(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -461,6 +472,7 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>(({
|
|||||||
await process_line(buffer);
|
await process_line(buffer);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
setSnack("Error processing query", "error")
|
setSnack("Error processing query", "error")
|
||||||
|
console.error(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -579,7 +591,7 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>(({
|
|||||||
|
|
||||||
export type {
|
export type {
|
||||||
ConversationProps,
|
ConversationProps,
|
||||||
ConversationHandle
|
ConversationHandle,
|
||||||
};
|
};
|
||||||
|
|
||||||
export {
|
export {
|
||||||
|
@ -76,11 +76,6 @@ interface MessageProps {
|
|||||||
className?: string,
|
className?: string,
|
||||||
};
|
};
|
||||||
|
|
||||||
interface ChatQueryInterface {
|
|
||||||
text: string,
|
|
||||||
submitQuery?: (text: string) => void
|
|
||||||
}
|
|
||||||
|
|
||||||
interface MessageMetaProps {
|
interface MessageMetaProps {
|
||||||
metadata: MessageMetaData,
|
metadata: MessageMetaData,
|
||||||
messageProps: MessageProps
|
messageProps: MessageProps
|
||||||
@ -157,7 +152,7 @@ const MessageMeta = (props: MessageMetaProps) => {
|
|||||||
<Box sx={{ fontSize: "0.75rem", display: "flex", flexDirection: "column", mt: 1, mb: 1, fontWeight: "bold" }}>
|
<Box sx={{ fontSize: "0.75rem", display: "flex", flexDirection: "column", mt: 1, mb: 1, fontWeight: "bold" }}>
|
||||||
{tool.name}
|
{tool.name}
|
||||||
</Box>
|
</Box>
|
||||||
<JsonView displayDataTypes={false} objectSortKeys={true} collapsed={2} value={JSON.parse(tool.content)} style={{ fontSize: "0.8rem", maxHeight: "20rem", overflow: "auto" }}>
|
<JsonView displayDataTypes={false} objectSortKeys={true} collapsed={1} value={JSON.parse(tool.content)} style={{ fontSize: "0.8rem", maxHeight: "20rem", overflow: "auto" }}>
|
||||||
<JsonView.String
|
<JsonView.String
|
||||||
render={({ children, ...reset }) => {
|
render={({ children, ...reset }) => {
|
||||||
if (typeof (children) === "string" && children.match("\n")) {
|
if (typeof (children) === "string" && children.match("\n")) {
|
||||||
@ -209,7 +204,7 @@ const MessageMeta = (props: MessageMetaProps) => {
|
|||||||
</Box>
|
</Box>
|
||||||
</AccordionSummary>
|
</AccordionSummary>
|
||||||
<AccordionDetails>
|
<AccordionDetails>
|
||||||
<JsonView displayDataTypes={false} objectSortKeys={true} collapsed={2} value={message} style={{ fontSize: "0.8rem", maxHeight: "20rem", overflow: "auto" }}>
|
<JsonView displayDataTypes={false} objectSortKeys={true} collapsed={1} value={message} style={{ fontSize: "0.8rem", maxHeight: "20rem", overflow: "auto" }}>
|
||||||
<JsonView.String
|
<JsonView.String
|
||||||
render={({ children, ...reset }) => {
|
render={({ children, ...reset }) => {
|
||||||
if (typeof (children) === "string" && children.match("\n")) {
|
if (typeof (children) === "string" && children.match("\n")) {
|
||||||
@ -223,22 +218,6 @@ const MessageMeta = (props: MessageMetaProps) => {
|
|||||||
</>);
|
</>);
|
||||||
};
|
};
|
||||||
|
|
||||||
const ChatQuery = ({ text, submitQuery }: ChatQueryInterface) => {
|
|
||||||
if (submitQuery === undefined) {
|
|
||||||
return (<Box>{text}</Box>);
|
|
||||||
}
|
|
||||||
return (
|
|
||||||
<Button variant="outlined" sx={{
|
|
||||||
color: theme => theme.palette.custom.highlight, // Golden Ochre (#D4A017)
|
|
||||||
borderColor: theme => theme.palette.custom.highlight,
|
|
||||||
m: 1
|
|
||||||
}}
|
|
||||||
size="small" onClick={(e: any) => { submitQuery(text); }}>
|
|
||||||
{text}
|
|
||||||
</Button>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const Message = (props: MessageProps) => {
|
const Message = (props: MessageProps) => {
|
||||||
const { message, submitQuery, isFullWidth, sx, className } = props;
|
const { message, submitQuery, isFullWidth, sx, className } = props;
|
||||||
const [expanded, setExpanded] = useState<boolean>(false);
|
const [expanded, setExpanded] = useState<boolean>(false);
|
||||||
@ -323,14 +302,12 @@ const Message = (props: MessageProps) => {
|
|||||||
export type {
|
export type {
|
||||||
MessageProps,
|
MessageProps,
|
||||||
MessageList,
|
MessageList,
|
||||||
ChatQueryInterface,
|
|
||||||
MessageData,
|
MessageData,
|
||||||
MessageRoles
|
MessageRoles
|
||||||
};
|
};
|
||||||
|
|
||||||
export {
|
export {
|
||||||
Message,
|
Message,
|
||||||
ChatQuery,
|
|
||||||
MessageMeta
|
MessageMeta
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ import {
|
|||||||
} from '@mui/icons-material';
|
} from '@mui/icons-material';
|
||||||
import { SxProps, Theme } from '@mui/material';
|
import { SxProps, Theme } from '@mui/material';
|
||||||
|
|
||||||
import { ChatQuery } from './Message';
|
import { ChatQuery } from './ChatQuery';
|
||||||
import { MessageList, MessageData } from './Message';
|
import { MessageList, MessageData } from './Message';
|
||||||
import { SetSnackType } from './Snack';
|
import { SetSnackType } from './Snack';
|
||||||
import { Conversation } from './Conversation';
|
import { Conversation } from './Conversation';
|
||||||
@ -97,6 +97,12 @@ const ResumeBuilder: React.FC<ResumeBuilderProps> = ({
|
|||||||
if (messages === undefined || messages.length === 0) {
|
if (messages === undefined || messages.length === 0) {
|
||||||
return [];
|
return [];
|
||||||
}
|
}
|
||||||
|
console.log("filterJobDescriptionMessages disabled")
|
||||||
|
if (messages.length > 1) {
|
||||||
|
setHasResume(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
return messages;
|
||||||
|
|
||||||
let reduced = messages.filter((m, i) => {
|
let reduced = messages.filter((m, i) => {
|
||||||
const keep = (m.metadata?.origin || m.origin || "no origin") === 'job_description';
|
const keep = (m.metadata?.origin || m.origin || "no origin") === 'job_description';
|
||||||
@ -135,6 +141,11 @@ const ResumeBuilder: React.FC<ResumeBuilderProps> = ({
|
|||||||
if (messages === undefined || messages.length === 0) {
|
if (messages === undefined || messages.length === 0) {
|
||||||
return [];
|
return [];
|
||||||
}
|
}
|
||||||
|
console.log("filterResumeMessages disabled")
|
||||||
|
if (messages.length > 3) {
|
||||||
|
setHasFacts(true);
|
||||||
|
}
|
||||||
|
return messages;
|
||||||
|
|
||||||
let reduced = messages.filter((m, i) => {
|
let reduced = messages.filter((m, i) => {
|
||||||
const keep = (m.metadata?.origin || m.origin || "no origin") === 'resume';
|
const keep = (m.metadata?.origin || m.origin || "no origin") === 'resume';
|
||||||
@ -182,6 +193,9 @@ const ResumeBuilder: React.FC<ResumeBuilderProps> = ({
|
|||||||
if (messages === undefined || messages.length === 0) {
|
if (messages === undefined || messages.length === 0) {
|
||||||
return [];
|
return [];
|
||||||
}
|
}
|
||||||
|
console.log("filterFactsMessages disabled")
|
||||||
|
return messages;
|
||||||
|
|
||||||
// messages.forEach((m, i) => console.log(`filterFactsMessages: ${i + 1}:`, m))
|
// messages.forEach((m, i) => console.log(`filterFactsMessages: ${i + 1}:`, m))
|
||||||
|
|
||||||
const reduced = messages.filter(m => {
|
const reduced = messages.filter(m => {
|
||||||
@ -240,8 +254,8 @@ const ResumeBuilder: React.FC<ResumeBuilderProps> = ({
|
|||||||
console.log('renderJobDescriptionView');
|
console.log('renderJobDescriptionView');
|
||||||
const jobDescriptionQuestions = [
|
const jobDescriptionQuestions = [
|
||||||
<Box sx={{ display: "flex", flexDirection: small ? "column" : "row" }}>
|
<Box sx={{ display: "flex", flexDirection: small ? "column" : "row" }}>
|
||||||
<ChatQuery text="What are the key skills necessary for this position?" submitQuery={handleJobQuery} />
|
<ChatQuery prompt="What are the key skills necessary for this position?" tunables={{ enable_tools: false }} submitQuery={handleJobQuery} />
|
||||||
<ChatQuery text="How much should this position pay (accounting for inflation)?" submitQuery={handleJobQuery} />
|
<ChatQuery prompt="How much should this position pay (accounting for inflation)?" tunables={{ enable_tools: false }} submitQuery={handleJobQuery} />
|
||||||
</Box>,
|
</Box>,
|
||||||
];
|
];
|
||||||
|
|
||||||
@ -289,8 +303,8 @@ const ResumeBuilder: React.FC<ResumeBuilderProps> = ({
|
|||||||
const renderResumeView = useCallback((small: boolean) => {
|
const renderResumeView = useCallback((small: boolean) => {
|
||||||
const resumeQuestions = [
|
const resumeQuestions = [
|
||||||
<Box sx={{ display: "flex", flexDirection: small ? "column" : "row" }}>
|
<Box sx={{ display: "flex", flexDirection: small ? "column" : "row" }}>
|
||||||
<ChatQuery text="Is this resume a good fit for the provided job description?" submitQuery={handleResumeQuery} />
|
<ChatQuery prompt="Is this resume a good fit for the provided job description?" tunables={{ enable_tools: false }} submitQuery={handleResumeQuery} />
|
||||||
<ChatQuery text="Provide a more concise resume." submitQuery={handleResumeQuery} />
|
<ChatQuery prompt="Provide a more concise resume." tunables={{ enable_tools: false }} submitQuery={handleResumeQuery} />
|
||||||
</Box>,
|
</Box>,
|
||||||
];
|
];
|
||||||
|
|
||||||
@ -298,9 +312,9 @@ const ResumeBuilder: React.FC<ResumeBuilderProps> = ({
|
|||||||
return <Conversation
|
return <Conversation
|
||||||
ref={resumeConversationRef}
|
ref={resumeConversationRef}
|
||||||
{...{
|
{...{
|
||||||
actionLabel: "Fact Check",
|
|
||||||
multiline: true,
|
|
||||||
type: "resume",
|
type: "resume",
|
||||||
|
actionLabel: "Fact Check",
|
||||||
|
defaultQuery: "Fact check the resume.",
|
||||||
resetLabel: `job description${hasFacts ? ", resume, and fact check" : hasResume ? " and resume" : ""}`,
|
resetLabel: `job description${hasFacts ? ", resume, and fact check" : hasResume ? " and resume" : ""}`,
|
||||||
messageFilter: filterResumeMessages,
|
messageFilter: filterResumeMessages,
|
||||||
onResponse: resumeResponse,
|
onResponse: resumeResponse,
|
||||||
@ -319,12 +333,12 @@ const ResumeBuilder: React.FC<ResumeBuilderProps> = ({
|
|||||||
prompt: "Ask a question about this job resume...",
|
prompt: "Ask a question about this job resume...",
|
||||||
resetLabel: `job description${hasFacts ? ", resume, and fact check" : hasResume ? " and resume" : ""}`,
|
resetLabel: `job description${hasFacts ? ", resume, and fact check" : hasResume ? " and resume" : ""}`,
|
||||||
messageFilter: filterResumeMessages,
|
messageFilter: filterResumeMessages,
|
||||||
defaultPrompts: resumeQuestions,
|
|
||||||
resetAction: resetResume,
|
|
||||||
onResponse: resumeResponse,
|
onResponse: resumeResponse,
|
||||||
|
resetAction: resetResume,
|
||||||
sessionId,
|
sessionId,
|
||||||
connectionBase,
|
connectionBase,
|
||||||
setSnack,
|
setSnack,
|
||||||
|
defaultPrompts: resumeQuestions,
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
}
|
}
|
||||||
@ -336,7 +350,7 @@ const ResumeBuilder: React.FC<ResumeBuilderProps> = ({
|
|||||||
const renderFactCheckView = useCallback((small: boolean) => {
|
const renderFactCheckView = useCallback((small: boolean) => {
|
||||||
const factsQuestions = [
|
const factsQuestions = [
|
||||||
<Box sx={{ display: "flex", flexDirection: small ? "column" : "row" }}>
|
<Box sx={{ display: "flex", flexDirection: small ? "column" : "row" }}>
|
||||||
<ChatQuery text="Rewrite the resume to address any discrepancies." submitQuery={handleFactsQuery} />
|
<ChatQuery prompt="Rewrite the resume to address any discrepancies." tunables={{ enable_tools: false }} submitQuery={handleFactsQuery} />
|
||||||
</Box>,
|
</Box>,
|
||||||
];
|
];
|
||||||
|
|
||||||
|
@ -2,12 +2,12 @@ import React from 'react';
|
|||||||
import { MuiMarkdown } from 'mui-markdown';
|
import { MuiMarkdown } from 'mui-markdown';
|
||||||
import { useTheme } from '@mui/material/styles';
|
import { useTheme } from '@mui/material/styles';
|
||||||
import { Link } from '@mui/material';
|
import { Link } from '@mui/material';
|
||||||
import { ChatQuery } from './Message';
|
import { ChatQuery, QueryOptions } from './ChatQuery';
|
||||||
|
|
||||||
interface StyledMarkdownProps {
|
interface StyledMarkdownProps {
|
||||||
className?: string,
|
className?: string,
|
||||||
content: string,
|
content: string,
|
||||||
submitQuery?: (query: string) => void,
|
submitQuery?: (prompt: string, tunables?: QueryOptions) => void,
|
||||||
[key: string]: any, // For any additional props
|
[key: string]: any, // For any additional props
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -38,7 +38,7 @@ const StyledMarkdown: React.FC<StyledMarkdownProps> = ({ className, content, sub
|
|||||||
options.overrides.ChatQuery = {
|
options.overrides.ChatQuery = {
|
||||||
component: ChatQuery,
|
component: ChatQuery,
|
||||||
props: {
|
props: {
|
||||||
submitQuery
|
submitQuery,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -158,7 +158,7 @@ const VectorVisualizer: React.FC<VectorVisualizerProps> = (props: VectorVisualiz
|
|||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!result || !result.embeddings) return;
|
if (!result || !result.embeddings) return;
|
||||||
if (result.embeddings.length === 0) return;
|
if (result.embeddings.length === 0) return;
|
||||||
console.log('Result:', result);
|
|
||||||
const vectors: (number[])[] = [...result.embeddings];
|
const vectors: (number[])[] = [...result.embeddings];
|
||||||
const documents = [...result.documents || []];
|
const documents = [...result.documents || []];
|
||||||
const metadatas = [...result.metadatas || []];
|
const metadatas = [...result.metadatas || []];
|
||||||
|
325
src/server.py
325
src/server.py
@ -18,6 +18,7 @@ import math
|
|||||||
import warnings
|
import warnings
|
||||||
from typing import Any
|
from typing import Any
|
||||||
from collections import deque
|
from collections import deque
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
@ -53,6 +54,7 @@ from utils import (
|
|||||||
tools as Tools,
|
tools as Tools,
|
||||||
Context, Conversation, Message,
|
Context, Conversation, Message,
|
||||||
Agent,
|
Agent,
|
||||||
|
Tunables,
|
||||||
defines,
|
defines,
|
||||||
logger,
|
logger,
|
||||||
)
|
)
|
||||||
@ -64,26 +66,8 @@ rags = [
|
|||||||
# { "name": "LKML", "enabled": False, "description": "Full associative data for entire LKML mailing list archive." },
|
# { "name": "LKML", "enabled": False, "description": "Full associative data for entire LKML mailing list archive." },
|
||||||
]
|
]
|
||||||
|
|
||||||
system_message = f"""
|
|
||||||
Launched on {Tools.DateTime()}.
|
|
||||||
|
|
||||||
When answering queries, follow these steps:
|
|
||||||
|
|
||||||
- First analyze the query to determine if real-time information from the tools might be helpful
|
|
||||||
- Even when <|context|> is provided, consider whether the tools would provide more current or comprehensive information
|
|
||||||
- Use the provided tools whenever they would enhance your response, regardless of whether context is also available
|
|
||||||
- When presenting weather forecasts, include relevant emojis immediately before the corresponding text. For example, for a sunny day, say \"☀️ Sunny\" or if the forecast says there will be \"rain showers, say \"🌧️ Rain showers\". Use this mapping for weather emojis: Sunny: ☀️, Cloudy: ☁️, Rainy: 🌧️, Snowy: ❄️
|
|
||||||
- When both <|context|> and tool outputs are relevant, synthesize information from both sources to provide the most complete answer
|
|
||||||
- Always prioritize the most up-to-date and relevant information, whether it comes from <|context|> or tools
|
|
||||||
- If <|context|> and tool outputs contain conflicting information, prefer the tool outputs as they likely represent more current data
|
|
||||||
- If there is information in the <|context|>, <|job_description|>, or <|context|> sections to enhance the answer, incorporate it seamlessly and refer to it as 'the latest information' or 'recent data' instead of mentioning '<|context|>' (etc.) or quoting it directly.
|
|
||||||
- Avoid phrases like 'According to the <|context|>' or similar references to the <|context|>, <|job_description|>, or <|context|> tags.
|
|
||||||
|
|
||||||
Always use tools and <|context|> when possible. Be concise, and never make up information. If you do not know the answer, say so.
|
|
||||||
"""
|
|
||||||
|
|
||||||
system_message_old = f"""
|
system_message_old = f"""
|
||||||
Launched on {Tools.DateTime()}.
|
Launched on {datetime.now().isoformat()}.
|
||||||
|
|
||||||
When answering queries, follow these steps:
|
When answering queries, follow these steps:
|
||||||
|
|
||||||
@ -98,53 +82,9 @@ When answering queries, follow these steps:
|
|||||||
Always use tools and <|context|> when possible. Be concise, and never make up information. If you do not know the answer, say so.
|
Always use tools and <|context|> when possible. Be concise, and never make up information. If you do not know the answer, say so.
|
||||||
""".strip()
|
""".strip()
|
||||||
|
|
||||||
system_generate_resume = f"""
|
|
||||||
Launched on {Tools.DateTime()}.
|
|
||||||
|
|
||||||
You are a professional resume writer. Your task is to write a concise, polished, and tailored resume for a specific job based only on the individual's <|context|>.
|
|
||||||
|
|
||||||
When answering queries, follow these steps:
|
|
||||||
|
|
||||||
- You must not invent or assume any inforation not explicitly present in the <|context|>.
|
|
||||||
- Analyze the <|job_description|> to identify skills required for the job.
|
|
||||||
- Use the <|job_description|> provided to guide the focus, tone, and relevant skills or experience to highlight from the <|context|>.
|
|
||||||
- Identify and emphasize the experiences, achievements, and responsibilities from the <|context|> that best align with the <|job_description|>.
|
|
||||||
- Only provide information from <|context|> items if it is relevant to the <|job_description|>.
|
|
||||||
- Do not use the <|job_description|> skills unless listed in <|context|>.
|
|
||||||
- Do not include any information unless it is provided in <|context|>.
|
|
||||||
- Use the <|context|> to create a polished, professional resume.
|
|
||||||
- Do not list any locations or mailing addresses in the resume.
|
|
||||||
- If there is information in the <|context|>, <|job_description|>, <|context|>, or <|resume|> sections to enhance the answer, incorporate it seamlessly and refer to it using natural language instead of mentioning '<|job_description|>' (etc.) or quoting it directly.
|
|
||||||
- Avoid phrases like 'According to the <|context|>' or similar references to the <|context|>, <|job_description|>, or <|context|> tags.
|
|
||||||
- Ensure the langauge is clear, concise, and aligned with industry standards for professional resumes.
|
|
||||||
|
|
||||||
Structure the resume professionally with the following sections where applicable:
|
|
||||||
|
|
||||||
* Name: Use full name
|
|
||||||
* Professional Summary: A 2-4 sentence overview tailored to the job.
|
|
||||||
* Skills: A bullet list of key skills derived from the work history and relevant to the job.
|
|
||||||
* Professional Experience: A detailed list of roles, achievements, and responsibilities from <|context|> that relate to the <|job_description|>.
|
|
||||||
* Education: Include only if available in the work history.
|
|
||||||
* Notes: Indicate the initial draft of the resume was generated using the Backstory application.
|
|
||||||
|
|
||||||
""".strip()
|
|
||||||
|
|
||||||
system_fact_check = f"""
|
|
||||||
Launched on {Tools.DateTime()}.
|
|
||||||
|
|
||||||
You are a professional resume fact checker. Your task is to identify any inaccuracies in the <|resume|> based on the individual's <|context|>.
|
|
||||||
|
|
||||||
If there are inaccuracies, list them in a bullet point format.
|
|
||||||
|
|
||||||
When answering queries, follow these steps:
|
|
||||||
- You must not invent or assume any information not explicitly present in the <|context|>.
|
|
||||||
- Analyze the <|resume|> to identify any discrepancies or inaccuracies based on the <|context|>.
|
|
||||||
- If there is information in the <|context|>, <|job_description|>, <|context|>, or <|resume|> sections to enhance the answer, incorporate it seamlessly and refer to it using natural language instead of mentioning '<|job_description|>' (etc.) or quoting it directly.
|
|
||||||
- Avoid phrases like 'According to the <|context|>' or similar references to the <|context|>, <|job_description|>, <|resume|>, or <|context|> tags.
|
|
||||||
""".strip()
|
|
||||||
|
|
||||||
system_fact_check_QA = f"""
|
system_fact_check_QA = f"""
|
||||||
Launched on {Tools.DateTime()}.
|
Launched on {datetime.now().isoformat()}.
|
||||||
|
|
||||||
You are a professional resume fact checker.
|
You are a professional resume fact checker.
|
||||||
|
|
||||||
@ -153,18 +93,6 @@ You are provided with a <|resume|> which was generated by you, the <|context|> y
|
|||||||
Your task is to answer questions about the <|fact_check|> you generated based on the <|resume|> and <|context>.
|
Your task is to answer questions about the <|fact_check|> you generated based on the <|resume|> and <|context>.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
system_job_description = f"""
|
|
||||||
Launched on {Tools.DateTime()}.
|
|
||||||
|
|
||||||
You are a hiring and job placing specialist. Your task is to answers about a job description.
|
|
||||||
|
|
||||||
When answering queries, follow these steps:
|
|
||||||
- Analyze the <|job_description|> to provide insights for the asked question.
|
|
||||||
- If any financial information is requested, be sure to account for inflation.
|
|
||||||
- If there is information in the <|context|>, <|job_description|>, <|context|>, or <|resume|> sections to enhance the answer, incorporate it seamlessly and refer to it using natural language instead of mentioning '<|job_description|>' (etc.) or quoting it directly.
|
|
||||||
- Avoid phrases like 'According to the <|context|>' or similar references to the <|context|>, <|job_description|>, <|resume|>, or <|context|> tags.
|
|
||||||
""".strip()
|
|
||||||
|
|
||||||
def get_installed_ram():
|
def get_installed_ram():
|
||||||
try:
|
try:
|
||||||
with open("/proc/meminfo", "r") as f:
|
with open("/proc/meminfo", "r") as f:
|
||||||
@ -440,27 +368,27 @@ class WebServer:
|
|||||||
match reset_operation:
|
match reset_operation:
|
||||||
case "system_prompt":
|
case "system_prompt":
|
||||||
logger.info(f"Resetting {reset_operation}")
|
logger.info(f"Resetting {reset_operation}")
|
||||||
match agent_type:
|
# match agent_type:
|
||||||
case "chat":
|
# case "chat":
|
||||||
prompt = system_message
|
# prompt = system_message
|
||||||
case "job_description":
|
# case "job_description":
|
||||||
prompt = system_generate_resume
|
# prompt = system_generate_resume
|
||||||
case "resume":
|
# case "resume":
|
||||||
prompt = system_generate_resume
|
# prompt = system_generate_resume
|
||||||
case "fact_check":
|
# case "fact_check":
|
||||||
prompt = system_message
|
# prompt = system_message
|
||||||
case _:
|
# case _:
|
||||||
prompt = system_message
|
# prompt = system_message
|
||||||
|
|
||||||
agent.system_prompt = prompt
|
# agent.system_prompt = prompt
|
||||||
response["system_prompt"] = { "system_prompt": prompt }
|
# response["system_prompt"] = { "system_prompt": prompt }
|
||||||
case "rags":
|
case "rags":
|
||||||
logger.info(f"Resetting {reset_operation}")
|
logger.info(f"Resetting {reset_operation}")
|
||||||
context.rags = rags.copy()
|
context.rags = rags.copy()
|
||||||
response["rags"] = context.rags
|
response["rags"] = context.rags
|
||||||
case "tools":
|
case "tools":
|
||||||
logger.info(f"Resetting {reset_operation}")
|
logger.info(f"Resetting {reset_operation}")
|
||||||
context.tools = Tools.default_tools(Tools.tools)
|
context.tools = Tools.enabled_tools(Tools.tools)
|
||||||
response["tools"] = context.tools
|
response["tools"] = context.tools
|
||||||
case "history":
|
case "history":
|
||||||
reset_map = {
|
reset_map = {
|
||||||
@ -579,27 +507,48 @@ class WebServer:
|
|||||||
@self.app.post("/api/chat/{context_id}/{agent_type}")
|
@self.app.post("/api/chat/{context_id}/{agent_type}")
|
||||||
async def post_chat_endpoint(context_id: str, agent_type: str, request: Request):
|
async def post_chat_endpoint(context_id: str, agent_type: str, request: Request):
|
||||||
logger.info(f"{request.method} {request.url.path}")
|
logger.info(f"{request.method} {request.url.path}")
|
||||||
|
if not is_valid_uuid(context_id):
|
||||||
|
logger.warning(f"Invalid context_id: {context_id}")
|
||||||
|
return JSONResponse({"error": "Invalid context_id"}, status_code=400)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if not is_valid_uuid(context_id):
|
|
||||||
logger.warning(f"Invalid context_id: {context_id}")
|
|
||||||
return JSONResponse({"error": "Invalid context_id"}, status_code=400)
|
|
||||||
context = self.upsert_context(context_id)
|
context = self.upsert_context(context_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
data = await request.json()
|
|
||||||
agent = context.get_agent(agent_type)
|
agent = context.get_agent(agent_type)
|
||||||
if not agent and agent_type == "job_description":
|
|
||||||
logger.info(f"Agent {agent_type} not found. Returning empty history.")
|
|
||||||
# Create a new agent if it doesn't exist
|
|
||||||
agent = context.get_or_create_agent("job_description", system_prompt=system_generate_resume, job_description=data["content"])
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.info(f"Attempt to create agent type: {agent_type} failed", e)
|
logger.info(f"Attempt to create agent type: {agent_type} failed", e)
|
||||||
return JSONResponse({ "error": f"{agent_type} is not recognized", "context": context.id }, status_code=404)
|
return JSONResponse({ "error": f"{agent_type} is not recognized", "context": context.id }, status_code=404)
|
||||||
|
|
||||||
|
query = await request.json()
|
||||||
|
prompt = query["prompt"]
|
||||||
|
if not isinstance(prompt, str) or len(prompt) == 0:
|
||||||
|
logger.info(f"Prompt is empty")
|
||||||
|
return JSONResponse({"error": "Prompt can not be empty"}, status_code=400)
|
||||||
|
try:
|
||||||
|
options = Tunables(**query["options"]) if "options" in query else None
|
||||||
|
except Exception as e:
|
||||||
|
logger.info(f"Attempt to set tunables failed: {query['options']}.", e)
|
||||||
|
return JSONResponse({"error": f"Invalid options: {query['options']}"}, status_code=400)
|
||||||
|
|
||||||
|
if not agent:
|
||||||
|
# job_description is the only agent that is dynamically generated from a
|
||||||
|
# Rest API endpoint.
|
||||||
|
# - 'chat' is created on context creation.
|
||||||
|
# - 'resume' is created on actions by 'job_description'
|
||||||
|
# - 'fact_check' is created on ations by 'fact_check'
|
||||||
|
match agent_type:
|
||||||
|
case "job_description":
|
||||||
|
logger.info(f"Agent {agent_type} not found. Returning empty history.")
|
||||||
|
agent = context.get_or_create_agent("job_description", job_description=prompt)
|
||||||
|
case _:
|
||||||
|
logger.info(f"Invalid agent creation sequence for {agent_type}. Returning error.")
|
||||||
|
return JSONResponse({ "error": f"{agent_type} is not recognized", "context": context.id }, status_code=404)
|
||||||
|
|
||||||
# Create a custom generator that ensures flushing
|
# Create a custom generator that ensures flushing
|
||||||
async def flush_generator():
|
async def flush_generator():
|
||||||
logging.info(f"Message starting. Streaming partial results.")
|
logging.info(f"Message starting. Streaming partial results.")
|
||||||
async for message in self.generate_response(context=context, agent=agent, content=data["content"]):
|
async for message in self.generate_response(context=context, agent=agent, prompt=prompt, options=options):
|
||||||
if message.status != "done":
|
if message.status != "done":
|
||||||
result = {
|
result = {
|
||||||
"status": message.status,
|
"status": message.status,
|
||||||
@ -607,11 +556,15 @@ class WebServer:
|
|||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
logging.info(f"Message complete. Providing full response.")
|
logging.info(f"Message complete. Providing full response.")
|
||||||
result = message.model_dump(mode='json')
|
try:
|
||||||
|
result = message.model_dump(by_alias=True, mode='json')
|
||||||
|
except Exception as e:
|
||||||
|
result = { "status": "error", "response": e }
|
||||||
|
exit(1)
|
||||||
|
# Convert to JSON and add newline
|
||||||
result = json.dumps(result) + "\n"
|
result = json.dumps(result) + "\n"
|
||||||
message.network_packets += 1
|
message.network_packets += 1
|
||||||
message.network_bytes += len(result)
|
message.network_bytes += len(result)
|
||||||
# Convert to JSON and add newline
|
|
||||||
yield result
|
yield result
|
||||||
# Explicitly flush after each yield
|
# Explicitly flush after each yield
|
||||||
await asyncio.sleep(0) # Allow the event loop to process the write
|
await asyncio.sleep(0) # Allow the event loop to process the write
|
||||||
@ -653,7 +606,7 @@ class WebServer:
|
|||||||
if not agent:
|
if not agent:
|
||||||
logger.info(f"Agent {agent_type} not found. Returning empty history.")
|
logger.info(f"Agent {agent_type} not found. Returning empty history.")
|
||||||
return JSONResponse({ "messages": [] })
|
return JSONResponse({ "messages": [] })
|
||||||
logger.info(f"History for {agent_type} contains {len(agent.conversation.messages)} entries.")
|
logger.info(f"History for {agent_type} contains {len(agent.conversation)} entries.")
|
||||||
return agent.conversation
|
return agent.conversation
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"get_history error: {str(e)}")
|
logger.error(f"get_history error: {str(e)}")
|
||||||
@ -735,7 +688,7 @@ class WebServer:
|
|||||||
|
|
||||||
# Serialize the data to JSON and write to file
|
# Serialize the data to JSON and write to file
|
||||||
with open(file_path, "w") as f:
|
with open(file_path, "w") as f:
|
||||||
f.write(context.model_dump_json())
|
f.write(context.model_dump_json(by_alias=True))
|
||||||
|
|
||||||
return context_id
|
return context_id
|
||||||
|
|
||||||
@ -800,13 +753,12 @@ class WebServer:
|
|||||||
|
|
||||||
if os.path.exists(defines.resume_doc):
|
if os.path.exists(defines.resume_doc):
|
||||||
context.user_resume = open(defines.resume_doc, "r").read()
|
context.user_resume = open(defines.resume_doc, "r").read()
|
||||||
context.get_or_create_agent(
|
context.get_or_create_agent(agent_type="chat")
|
||||||
agent_type="chat",
|
# system_prompt=system_message)
|
||||||
system_prompt=system_message)
|
|
||||||
# context.add_agent(Resume(system_prompt = system_generate_resume))
|
# context.add_agent(Resume(system_prompt = system_generate_resume))
|
||||||
# context.add_agent(JobDescription(system_prompt = system_job_description))
|
# context.add_agent(JobDescription(system_prompt = system_job_description))
|
||||||
# context.add_agent(FactCheck(system_prompt = system_fact_check))
|
# context.add_agent(FactCheck(system_prompt = system_fact_check))
|
||||||
context.tools = Tools.default_tools(Tools.tools)
|
context.tools = Tools.enabled_tools(Tools.tools)
|
||||||
context.rags = rags.copy()
|
context.rags = rags.copy()
|
||||||
|
|
||||||
logger.info(f"{context.id} created and added to contexts.")
|
logger.info(f"{context.id} created and added to contexts.")
|
||||||
@ -814,73 +766,6 @@ class WebServer:
|
|||||||
self.save_context(context.id)
|
self.save_context(context.id)
|
||||||
return context
|
return context
|
||||||
|
|
||||||
def get_optimal_ctx_size(self, context, messages, ctx_buffer = 4096):
|
|
||||||
ctx = round(context + len(str(messages)) * 3 / 4)
|
|
||||||
return max(defines.max_context, min(2048, ctx + ctx_buffer))
|
|
||||||
|
|
||||||
# %%
|
|
||||||
# async def handle_tool_calls(self, message):
|
|
||||||
# """
|
|
||||||
# Process tool calls and yield status updates along the way.
|
|
||||||
# The last yielded item will be a tuple containing (tool_result, tools_used).
|
|
||||||
# """
|
|
||||||
# tools_used = []
|
|
||||||
# all_responses = []
|
|
||||||
|
|
||||||
# for i, tool_call in enumerate(message["tool_calls"]):
|
|
||||||
# arguments = tool_call["function"]["arguments"]
|
|
||||||
# tool = tool_call["function"]["name"]
|
|
||||||
|
|
||||||
# # Yield status update before processing each tool
|
|
||||||
# yield {"status": "processing", "message": f"Processing tool {i+1}/{len(message['tool_calls'])}: {tool}..."}
|
|
||||||
|
|
||||||
# # Process the tool based on its type
|
|
||||||
# match tool:
|
|
||||||
# case "TickerValue":
|
|
||||||
# ticker = arguments.get("ticker")
|
|
||||||
# if not ticker:
|
|
||||||
# ret = None
|
|
||||||
# else:
|
|
||||||
# ret = Tools.TickerValue(ticker)
|
|
||||||
# tools_used.append({ "tool": f"{tool}({ticker})", "result": ret})
|
|
||||||
|
|
||||||
# case "AnalyzeSite":
|
|
||||||
# url = arguments.get("url")
|
|
||||||
# question = arguments.get("question", "what is the summary of this content?")
|
|
||||||
|
|
||||||
# # Additional status update for long-running operations
|
|
||||||
# yield {"status": "processing", "message": f"Retrieving and summarizing content from {url}..."}
|
|
||||||
# ret = await Tools.AnalyzeSite(llm=self.llm, model=self.model, url=url, question=question)
|
|
||||||
# tools_used.append({ "tool": f"{tool}('{url}', '{question}')", "result": ret })
|
|
||||||
|
|
||||||
# case "DateTime":
|
|
||||||
# tz = arguments.get("timezone")
|
|
||||||
# ret = Tools.DateTime(tz)
|
|
||||||
# tools_used.append({ "tool": f"{tool}('{tz}')", "result": ret })
|
|
||||||
|
|
||||||
# case "WeatherForecast":
|
|
||||||
# city = arguments.get("city")
|
|
||||||
# state = arguments.get("state")
|
|
||||||
|
|
||||||
# yield {"status": "processing", "message": f"Fetching weather data for {city}, {state}..."}
|
|
||||||
# ret = Tools.WeatherForecast(city, state)
|
|
||||||
# tools_used.append({ "tool": f"{tool}('{city}', '{state}')", "result": ret })
|
|
||||||
|
|
||||||
# case _:
|
|
||||||
# ret = None
|
|
||||||
|
|
||||||
# # Build response for this tool
|
|
||||||
# tool_response = {
|
|
||||||
# "role": "tool",
|
|
||||||
# "content": str(ret),
|
|
||||||
# "name": tool_call["function"]["name"]
|
|
||||||
# }
|
|
||||||
# all_responses.append(tool_response)
|
|
||||||
|
|
||||||
# # Yield the final result as the last item
|
|
||||||
# final_result = all_responses[0] if len(all_responses) == 1 else all_responses
|
|
||||||
# yield (final_result, tools_used)
|
|
||||||
|
|
||||||
def upsert_context(self, context_id = None) -> Context:
|
def upsert_context(self, context_id = None) -> Context:
|
||||||
"""
|
"""
|
||||||
Upsert a context based on the provided context_id.
|
Upsert a context based on the provided context_id.
|
||||||
@ -899,74 +784,34 @@ class WebServer:
|
|||||||
|
|
||||||
logger.info(f"Context {context_id} is not yet loaded.")
|
logger.info(f"Context {context_id} is not yet loaded.")
|
||||||
return self.load_or_create_context(context_id)
|
return self.load_or_create_context(context_id)
|
||||||
|
|
||||||
def generate_rag_results(self, context, content):
|
|
||||||
if not self.file_watcher:
|
|
||||||
raise Exception("File watcher not initialized")
|
|
||||||
|
|
||||||
results_found = False
|
async def generate_response(self, context : Context, agent : Agent, prompt : str, options: Tunables | None) -> AsyncGenerator[Message, None]:
|
||||||
|
|
||||||
for rag in context.rags:
|
|
||||||
if rag["enabled"] and rag["name"] == "JPK": # Only support JPK rag right now...
|
|
||||||
yield {"status": "processing", "message": f"Checking RAG context {rag['name']}..."}
|
|
||||||
chroma_results = self.file_watcher.find_similar(query=content, top_k=10)
|
|
||||||
if chroma_results:
|
|
||||||
results_found = True
|
|
||||||
chroma_embedding = np.array(chroma_results["query_embedding"]).flatten() # Ensure correct shape
|
|
||||||
logger.info(f"Chroma embedding shape: {chroma_embedding.shape}")
|
|
||||||
|
|
||||||
umap_2d = self.file_watcher.umap_model_2d.transform([chroma_embedding])[0].tolist()
|
|
||||||
logger.info(f"UMAP 2D output: {umap_2d}, length: {len(umap_2d)}") # Debug output
|
|
||||||
|
|
||||||
umap_3d = self.file_watcher.umap_model_3d.transform([chroma_embedding])[0].tolist()
|
|
||||||
logger.info(f"UMAP 3D output: {umap_3d}, length: {len(umap_3d)}") # Debug output
|
|
||||||
|
|
||||||
yield {
|
|
||||||
**chroma_results,
|
|
||||||
"name": rag["name"],
|
|
||||||
"umap_embedding_2d": umap_2d,
|
|
||||||
"umap_embedding_3d": umap_3d
|
|
||||||
}
|
|
||||||
|
|
||||||
if not results_found:
|
|
||||||
yield {"status": "complete", "message": "No RAG context found"}
|
|
||||||
yield {
|
|
||||||
"rag": None,
|
|
||||||
"documents": [],
|
|
||||||
"embeddings": [],
|
|
||||||
"umap_embedding_2d": [],
|
|
||||||
"umap_embedding_3d": []
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
yield {"status": "complete", "message": "RAG processing complete"}
|
|
||||||
|
|
||||||
async def generate_response(self, context : Context, agent : Agent, content : str) -> AsyncGenerator[Message, None]:
|
|
||||||
if not self.file_watcher:
|
if not self.file_watcher:
|
||||||
raise Exception("File watcher not initialized")
|
raise Exception("File watcher not initialized")
|
||||||
|
|
||||||
agent_type = agent.get_agent_type()
|
agent_type = agent.get_agent_type()
|
||||||
logger.info(f"generate_response: type - {agent_type} prompt - {content}")
|
logger.info(f"generate_response: type - {agent_type}")
|
||||||
if agent_type == "chat":
|
message = Message(prompt=prompt, options=agent.tunables)
|
||||||
message = Message(prompt=content)
|
if options:
|
||||||
async for message in agent.prepare_message(message):
|
message.tunables = options
|
||||||
# logger.info(f"{agent_type}.prepare_message: {value.status} - {value.response}")
|
|
||||||
if message.status == "error":
|
async for message in agent.prepare_message(message):
|
||||||
yield message
|
# logger.info(f"{agent_type}.prepare_message: {value.status} - {value.response}")
|
||||||
return
|
if message.status == "error":
|
||||||
if message.status != "done":
|
yield message
|
||||||
yield message
|
return
|
||||||
async for message in agent.process_message(self.llm, self.model, message):
|
if message.status != "done":
|
||||||
if message.status == "error":
|
yield message
|
||||||
yield message
|
async for message in agent.process_message(self.llm, self.model, message):
|
||||||
return
|
if message.status == "error":
|
||||||
if message.status != "done":
|
yield message
|
||||||
yield message
|
return
|
||||||
logger.info(f"{agent_type}.process_message: {message.status} {f'...{message.response[-20:]}' if len(message.response) > 20 else message.response}")
|
if message.status != "done":
|
||||||
message.status = "done"
|
yield message
|
||||||
yield message
|
logger.info(f"{agent_type}.process_message: {message.status} {f'...{message.response[-20:]}' if len(message.response) > 20 else message.response}")
|
||||||
return
|
message.status = "done"
|
||||||
|
yield message
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.processing:
|
if self.processing:
|
||||||
logger.info("TODO: Implement delay queing; busy for same agent, otherwise return queue size and estimated wait time")
|
logger.info("TODO: Implement delay queing; busy for same agent, otherwise return queue size and estimated wait time")
|
||||||
@ -1124,7 +969,7 @@ Use to the above information to respond to this prompt:
|
|||||||
stuffingMessage.response = "Job description stored to use in future queries."
|
stuffingMessage.response = "Job description stored to use in future queries."
|
||||||
stuffingMessage.metadata["origin"] = "job_description"
|
stuffingMessage.metadata["origin"] = "job_description"
|
||||||
stuffingMessage.metadata["display"] = "hide"
|
stuffingMessage.metadata["display"] = "hide"
|
||||||
conversation.add_message(stuffingMessage)
|
conversation.add(stuffingMessage)
|
||||||
|
|
||||||
message.add_action("generate_resume")
|
message.add_action("generate_resume")
|
||||||
|
|
||||||
@ -1210,7 +1055,7 @@ Use the above <|resume|> and <|job_description|> to answer this query:
|
|||||||
stuffingMessage.metadata["display"] = "hide"
|
stuffingMessage.metadata["display"] = "hide"
|
||||||
stuffingMessage.actions = [ "fact_check" ]
|
stuffingMessage.actions = [ "fact_check" ]
|
||||||
logger.info("TODO: Switch this to use actions to keep the UI from showingit")
|
logger.info("TODO: Switch this to use actions to keep the UI from showingit")
|
||||||
conversation.add_message(stuffingMessage)
|
conversation.add(stuffingMessage)
|
||||||
|
|
||||||
# For all future calls to job_description, use the system_job_description
|
# For all future calls to job_description, use the system_job_description
|
||||||
logger.info("TODO: Create a system_resume_QA prompt to use for the resume agent")
|
logger.info("TODO: Create a system_resume_QA prompt to use for the resume agent")
|
||||||
@ -1226,7 +1071,7 @@ Use the above <|resume|> and <|job_description|> to answer this query:
|
|||||||
case _:
|
case _:
|
||||||
raise Exception(f"Invalid chat agent_type: {agent_type}")
|
raise Exception(f"Invalid chat agent_type: {agent_type}")
|
||||||
|
|
||||||
conversation.add_message(message)
|
conversation.add(message)
|
||||||
# llm_history.append({"role": "user", "content": message.preamble + content})
|
# llm_history.append({"role": "user", "content": message.preamble + content})
|
||||||
# user_history.append({"role": "user", "content": content, "origin": message.metadata["origin"]})
|
# user_history.append({"role": "user", "content": content, "origin": message.metadata["origin"]})
|
||||||
# message.metadata["full_query"] = llm_history[-1]["content"]
|
# message.metadata["full_query"] = llm_history[-1]["content"]
|
||||||
|
@ -5,13 +5,14 @@ import importlib
|
|||||||
from . import defines
|
from . import defines
|
||||||
from . context import Context
|
from . context import Context
|
||||||
from . conversation import Conversation
|
from . conversation import Conversation
|
||||||
from . message import Message
|
from . message import Message, Tunables
|
||||||
from . rag import ChromaDBFileWatcher, start_file_watcher
|
from . rag import ChromaDBFileWatcher, start_file_watcher
|
||||||
from . setup_logging import setup_logging
|
from . setup_logging import setup_logging
|
||||||
from . agents import class_registry, AnyAgent, Agent, __all__ as agents_all
|
from . agents import class_registry, AnyAgent, Agent, __all__ as agents_all
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
'Agent',
|
'Agent',
|
||||||
|
'Tunables',
|
||||||
'Context',
|
'Context',
|
||||||
'Conversation',
|
'Conversation',
|
||||||
'Message',
|
'Message',
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
from typing import TypeAlias, Dict, Tuple
|
from typing import TypeAlias, Dict, Tuple, Optional
|
||||||
import importlib
|
import importlib
|
||||||
import pathlib
|
import pathlib
|
||||||
import inspect
|
import inspect
|
||||||
@ -9,10 +9,9 @@ from .. setup_logging import setup_logging
|
|||||||
from .. import defines
|
from .. import defines
|
||||||
from . base import Agent
|
from . base import Agent
|
||||||
|
|
||||||
|
|
||||||
logger = setup_logging(defines.logging_level)
|
logger = setup_logging(defines.logging_level)
|
||||||
|
|
||||||
__all__ = [ "AnyAgent", "registry", "class_registry" ]
|
__all__ = [ "AnyAgent", "Agent", "registry", "class_registry" ]
|
||||||
|
|
||||||
# Type alias for Agent or any subclass
|
# Type alias for Agent or any subclass
|
||||||
AnyAgent: TypeAlias = Agent # BaseModel covers Agent and subclasses
|
AnyAgent: TypeAlias = Agent # BaseModel covers Agent and subclasses
|
||||||
|
@ -4,16 +4,15 @@ from typing import (
|
|||||||
Literal, get_args, List, AsyncGenerator, TYPE_CHECKING, Optional, ClassVar, Any,
|
Literal, get_args, List, AsyncGenerator, TYPE_CHECKING, Optional, ClassVar, Any,
|
||||||
TypeAlias, Dict, Tuple
|
TypeAlias, Dict, Tuple
|
||||||
)
|
)
|
||||||
from abc import ABC
|
|
||||||
from .. setup_logging import setup_logging
|
|
||||||
from .. import defines
|
|
||||||
from abc import ABC
|
|
||||||
import logging
|
|
||||||
from .. message import Message
|
|
||||||
from .. tools import ( TickerValue, WeatherForecast, AnalyzeSite, DateTime, llm_tools ) # type: ignore -- dynamically added to __all__
|
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
import inspect
|
import inspect
|
||||||
|
from abc import ABC
|
||||||
|
|
||||||
|
from .. setup_logging import setup_logging
|
||||||
|
from .. import defines
|
||||||
|
from .. message import Message
|
||||||
|
from .. tools import ( TickerValue, WeatherForecast, AnalyzeSite, DateTime, llm_tools ) # type: ignore -- dynamically added to __all__
|
||||||
|
|
||||||
logger = setup_logging()
|
logger = setup_logging()
|
||||||
|
|
||||||
@ -24,7 +23,12 @@ if TYPE_CHECKING:
|
|||||||
from .types import registry
|
from .types import registry
|
||||||
|
|
||||||
from .. conversation import Conversation
|
from .. conversation import Conversation
|
||||||
from .. message import Message
|
from .. message import Message, Tunables
|
||||||
|
|
||||||
|
class LLMMessage(BaseModel):
|
||||||
|
role : str = Field(default="")
|
||||||
|
content : str = Field(default="")
|
||||||
|
tool_calls : Optional[List[Dict]] = Field(default={}, exclude=True)
|
||||||
|
|
||||||
class Agent(BaseModel, ABC):
|
class Agent(BaseModel, ABC):
|
||||||
"""
|
"""
|
||||||
@ -35,15 +39,8 @@ class Agent(BaseModel, ABC):
|
|||||||
agent_type: Literal["base"] = "base"
|
agent_type: Literal["base"] = "base"
|
||||||
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
||||||
|
|
||||||
# context_size is shared across all subclasses
|
# Tunables (sets default for new Messages attached to this agent)
|
||||||
_context_size: ClassVar[int] = int(defines.max_context * 0.5)
|
tunables: Tunables = Field(default_factory=Tunables)
|
||||||
@property
|
|
||||||
def context_size(self) -> int:
|
|
||||||
return Agent._context_size
|
|
||||||
|
|
||||||
@context_size.setter
|
|
||||||
def context_size(self, value: int):
|
|
||||||
Agent._context_size = value
|
|
||||||
|
|
||||||
# Agent properties
|
# Agent properties
|
||||||
system_prompt: str # Mandatory
|
system_prompt: str # Mandatory
|
||||||
@ -51,7 +48,15 @@ class Agent(BaseModel, ABC):
|
|||||||
context_tokens: int = 0
|
context_tokens: int = 0
|
||||||
context: Optional[Context] = Field(default=None, exclude=True) # Avoid circular reference, require as param, and prevent serialization
|
context: Optional[Context] = Field(default=None, exclude=True) # Avoid circular reference, require as param, and prevent serialization
|
||||||
|
|
||||||
_content_seed: str = PrivateAttr(default="")
|
# context_size is shared across all subclasses
|
||||||
|
_context_size: ClassVar[int] = int(defines.max_context * 0.5)
|
||||||
|
@property
|
||||||
|
def context_size(self) -> int:
|
||||||
|
return Agent._context_size
|
||||||
|
|
||||||
|
@context_size.setter
|
||||||
|
def context_size(self, value: int):
|
||||||
|
Agent._context_size = value
|
||||||
|
|
||||||
def set_optimal_context_size(self, llm: Any, model: str, prompt: str, ctx_buffer=2048) -> int:
|
def set_optimal_context_size(self, llm: Any, model: str, prompt: str, ctx_buffer=2048) -> int:
|
||||||
# # Get more accurate token count estimate using tiktoken or similar
|
# # Get more accurate token count estimate using tiktoken or similar
|
||||||
@ -114,18 +119,18 @@ class Agent(BaseModel, ABC):
|
|||||||
"""
|
"""
|
||||||
Prepare message with context information in message.preamble
|
Prepare message with context information in message.preamble
|
||||||
"""
|
"""
|
||||||
logging.info(f"{self.agent_type} - {inspect.stack()[1].function}")
|
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||||
|
|
||||||
if not self.context:
|
if not self.context:
|
||||||
raise ValueError("Context is not set for this agent.")
|
raise ValueError("Context is not set for this agent.")
|
||||||
|
|
||||||
# Generate RAG content if enabled, based on the content
|
# Generate RAG content if enabled, based on the content
|
||||||
rag_context = ""
|
rag_context = ""
|
||||||
if message.enable_rag:
|
if message.tunables.enable_rag and message.prompt:
|
||||||
# Gather RAG results, yielding each result
|
# Gather RAG results, yielding each result
|
||||||
# as it becomes available
|
# as it becomes available
|
||||||
for message in self.context.generate_rag_results(message):
|
for message in self.context.generate_rag_results(message):
|
||||||
logging.info(f"RAG: {message.status} - {message.response}")
|
logger.info(f"RAG: {message.status} - {message.response}")
|
||||||
if message.status == "error":
|
if message.status == "error":
|
||||||
yield message
|
yield message
|
||||||
return
|
return
|
||||||
@ -142,27 +147,16 @@ class Agent(BaseModel, ABC):
|
|||||||
if rag_context:
|
if rag_context:
|
||||||
message.preamble["context"] = rag_context
|
message.preamble["context"] = rag_context
|
||||||
|
|
||||||
if self.context.user_resume:
|
if message.tunables.enable_context and self.context.user_resume:
|
||||||
message.preamble["resume"] = self.context.user_resume
|
message.preamble["resume"] = self.context.user_resume
|
||||||
|
|
||||||
if message.preamble:
|
|
||||||
preamble_types = [f"<|{p}|>" for p in message.preamble.keys()]
|
|
||||||
preamble_types_AND = " and ".join(preamble_types)
|
|
||||||
preamble_types_OR = " or ".join(preamble_types)
|
|
||||||
message.preamble["rules"] = f"""\
|
|
||||||
- Answer the question based on the information provided in the {preamble_types_AND} sections by incorporate it seamlessly and refer to it using natural language instead of mentioning {preamble_types_OR} or quoting it directly.
|
|
||||||
- If there is no information in these sections, answer based on your knowledge, or use any available tools.
|
|
||||||
- Avoid phrases like 'According to the {preamble_types[0]}' or similar references to the {preamble_types_OR}.
|
|
||||||
"""
|
|
||||||
message.preamble["question"] = "Respond to:"
|
|
||||||
|
|
||||||
message.system_prompt = self.system_prompt
|
message.system_prompt = self.system_prompt
|
||||||
message.status = "done"
|
message.status = "done"
|
||||||
yield message
|
yield message
|
||||||
return
|
return
|
||||||
|
|
||||||
async def process_tool_calls(self, llm: Any, model: str, message: Message, tool_message: Any, messages: List[Any]) -> AsyncGenerator[Message, None]:
|
async def process_tool_calls(self, llm: Any, model: str, message: Message, tool_message: Any, messages: List[LLMMessage]) -> AsyncGenerator[Message, None]:
|
||||||
logging.info(f"{self.agent_type} - {inspect.stack()[1].function}")
|
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||||
|
|
||||||
if not self.context:
|
if not self.context:
|
||||||
raise ValueError("Context is not set for this agent.")
|
raise ValueError("Context is not set for this agent.")
|
||||||
@ -170,7 +164,6 @@ class Agent(BaseModel, ABC):
|
|||||||
raise ValueError("tools field not initialized")
|
raise ValueError("tools field not initialized")
|
||||||
|
|
||||||
tool_metadata = message.metadata["tools"]
|
tool_metadata = message.metadata["tools"]
|
||||||
tool_metadata["messages"] = messages
|
|
||||||
tool_metadata["tool_calls"] = []
|
tool_metadata["tool_calls"] = []
|
||||||
|
|
||||||
message.status = "tooling"
|
message.status = "tooling"
|
||||||
@ -182,7 +175,7 @@ class Agent(BaseModel, ABC):
|
|||||||
# Yield status update before processing each tool
|
# Yield status update before processing each tool
|
||||||
message.response = f"Processing tool {i+1}/{len(tool_message.tool_calls)}: {tool}..."
|
message.response = f"Processing tool {i+1}/{len(tool_message.tool_calls)}: {tool}..."
|
||||||
yield message
|
yield message
|
||||||
logging.info(f"LLM - {message.response}")
|
logger.info(f"LLM - {message.response}")
|
||||||
|
|
||||||
# Process the tool based on its type
|
# Process the tool based on its type
|
||||||
match tool:
|
match tool:
|
||||||
@ -231,17 +224,17 @@ class Agent(BaseModel, ABC):
|
|||||||
yield message
|
yield message
|
||||||
return
|
return
|
||||||
|
|
||||||
message_dict = {
|
message_dict = LLMMessage(
|
||||||
"role": tool_message.get("role", "assistant"),
|
role=tool_message.get("role", "assistant"),
|
||||||
"content": tool_message.get("content", ""),
|
content=tool_message.get("content", ""),
|
||||||
"tool_calls": [ {
|
tool_calls=[ {
|
||||||
"function": {
|
"function": {
|
||||||
"name": tc["function"]["name"],
|
"name": tc["function"]["name"],
|
||||||
"arguments": tc["function"]["arguments"]
|
"arguments": tc["function"]["arguments"]
|
||||||
}
|
}
|
||||||
} for tc in tool_message.tool_calls
|
} for tc in tool_message.tool_calls
|
||||||
]
|
]
|
||||||
}
|
)
|
||||||
|
|
||||||
messages.append(message_dict)
|
messages.append(message_dict)
|
||||||
messages.extend(tool_metadata["tool_calls"])
|
messages.extend(tool_metadata["tool_calls"])
|
||||||
@ -262,7 +255,7 @@ class Agent(BaseModel, ABC):
|
|||||||
# "temperature": 0.5,
|
# "temperature": 0.5,
|
||||||
}
|
}
|
||||||
):
|
):
|
||||||
# logging.info(f"LLM::Tools: {'done' if response.done else 'processing'} - {response.message}")
|
# logger.info(f"LLM::Tools: {'done' if response.done else 'processing'} - {response.message}")
|
||||||
message.status = "streaming"
|
message.status = "streaming"
|
||||||
message.response += response.message.content
|
message.response += response.message.content
|
||||||
if not response.done:
|
if not response.done:
|
||||||
@ -281,24 +274,25 @@ class Agent(BaseModel, ABC):
|
|||||||
return
|
return
|
||||||
|
|
||||||
async def generate_llm_response(self, llm: Any, model: str, message: Message) -> AsyncGenerator[Message, None]:
|
async def generate_llm_response(self, llm: Any, model: str, message: Message) -> AsyncGenerator[Message, None]:
|
||||||
logging.info(f"{self.agent_type} - {inspect.stack()[1].function}")
|
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||||
|
|
||||||
if not self.context:
|
if not self.context:
|
||||||
raise ValueError("Context is not set for this agent.")
|
raise ValueError("Context is not set for this agent.")
|
||||||
|
|
||||||
messages = [ { "role": "system", "content": message.system_prompt } ]
|
# Create a pruned down message list based purely on the prompt and responses,
|
||||||
|
# discarding the full preamble generated by prepare_message
|
||||||
|
messages: List[LLMMessage] = [ LLMMessage(role="system", content=message.system_prompt) ]
|
||||||
messages.extend([
|
messages.extend([
|
||||||
item for m in self.conversation.messages
|
item for m in self.conversation
|
||||||
for item in [
|
for item in [
|
||||||
{"role": "user", "content": m.prompt.strip()},
|
LLMMessage(role="user", content=m.prompt.strip()),
|
||||||
{"role": "assistant", "content": m.response.strip()}
|
LLMMessage(role="assistant", content=m.response.strip())
|
||||||
]
|
]
|
||||||
])
|
])
|
||||||
messages.append({
|
# Only the actual user query is provided with the full context message
|
||||||
"role": "user",
|
messages.append(LLMMessage(role="user", content=message.context_prompt.strip()))
|
||||||
"content": message.context_prompt.strip(),
|
|
||||||
})
|
#message.metadata["messages"] = messages
|
||||||
message.metadata["messages"] = messages
|
|
||||||
message.metadata["options"]={
|
message.metadata["options"]={
|
||||||
"seed": 8911,
|
"seed": 8911,
|
||||||
"num_ctx": self.context_size,
|
"num_ctx": self.context_size,
|
||||||
@ -307,7 +301,7 @@ class Agent(BaseModel, ABC):
|
|||||||
|
|
||||||
message.metadata["timers"] = {}
|
message.metadata["timers"] = {}
|
||||||
|
|
||||||
use_tools = message.enable_tools and len(self.context.tools) > 0
|
use_tools = message.tunables.enable_tools and len(self.context.tools) > 0
|
||||||
message.metadata["tools"] = {
|
message.metadata["tools"] = {
|
||||||
"available": llm_tools(self.context.tools),
|
"available": llm_tools(self.context.tools),
|
||||||
"used": False
|
"used": False
|
||||||
@ -319,37 +313,38 @@ class Agent(BaseModel, ABC):
|
|||||||
message.response = f"Performing tool analysis step 1/2..."
|
message.response = f"Performing tool analysis step 1/2..."
|
||||||
yield message
|
yield message
|
||||||
|
|
||||||
logging.info("Checking for LLM tool usage")
|
logger.info("Checking for LLM tool usage")
|
||||||
start_time = time.perf_counter()
|
start_time = time.perf_counter()
|
||||||
# Tools are enabled and available, so query the LLM with a short token target to see if it will
|
# Tools are enabled and available, so query the LLM with a short token target to see if it will
|
||||||
# use the tools
|
# use the tools
|
||||||
tool_metadata["messages"] = [{ "role": "system", "content": self.system_prompt}, {"role": "user", "content": message.prompt}]
|
tool_metadata["messages"] = [{ "role": "system", "content": self.system_prompt}, {"role": "user", "content": message.prompt}]
|
||||||
response = llm.chat(
|
response = llm.chat(
|
||||||
model=model,
|
model=model,
|
||||||
messages=tool_metadata["messages"],
|
messages=tool_metadata["messages"],
|
||||||
tools=tool_metadata["available"],
|
tools=tool_metadata["available"],
|
||||||
options={
|
options={
|
||||||
**message.metadata["options"],
|
**message.metadata["options"],
|
||||||
#"num_predict": 1024, # "Low" token limit to cut off after tool call
|
#"num_predict": 1024, # "Low" token limit to cut off after tool call
|
||||||
},
|
},
|
||||||
stream=False # No need to stream the probe
|
stream=False # No need to stream the probe
|
||||||
)
|
)
|
||||||
end_time = time.perf_counter()
|
end_time = time.perf_counter()
|
||||||
message.metadata["timers"]["tool_check"] = f"{(end_time - start_time):.4f}"
|
message.metadata["timers"]["tool_check"] = f"{(end_time - start_time):.4f}"
|
||||||
if not response.message.tool_calls:
|
if not response.message.tool_calls:
|
||||||
logging.info("LLM indicates tools will not be used")
|
logger.info("LLM indicates tools will not be used")
|
||||||
# The LLM will not use tools, so disable use_tools so we can stream the full response
|
# The LLM will not use tools, so disable use_tools so we can stream the full response
|
||||||
use_tools = False
|
use_tools = False
|
||||||
|
else:
|
||||||
|
tool_metadata["attempted"] = response.message.tool_calls
|
||||||
|
|
||||||
if use_tools:
|
if use_tools:
|
||||||
logging.info("LLM indicates tools will be used")
|
logger.info("LLM indicates tools will be used")
|
||||||
|
|
||||||
# Tools are enabled and available and the LLM indicated it will use them
|
# Tools are enabled and available and the LLM indicated it will use them
|
||||||
tool_metadata["attempted"] = response.message.tool_calls
|
|
||||||
message.response = f"Performing tool analysis step 2/2 (tool use suspected)..."
|
message.response = f"Performing tool analysis step 2/2 (tool use suspected)..."
|
||||||
yield message
|
yield message
|
||||||
|
|
||||||
logging.info(f"Performing LLM call with tools")
|
logger.info(f"Performing LLM call with tools")
|
||||||
start_time = time.perf_counter()
|
start_time = time.perf_counter()
|
||||||
response = llm.chat(
|
response = llm.chat(
|
||||||
model=model,
|
model=model,
|
||||||
@ -383,13 +378,15 @@ class Agent(BaseModel, ABC):
|
|||||||
message.status = "done"
|
message.status = "done"
|
||||||
return
|
return
|
||||||
|
|
||||||
logging.info("LLM indicated tools will be used, and then they weren't")
|
logger.info("LLM indicated tools will be used, and then they weren't")
|
||||||
message.response = response.message.content
|
message.response = response.message.content
|
||||||
message.status = "done"
|
message.status = "done"
|
||||||
yield message
|
yield message
|
||||||
return
|
return
|
||||||
|
|
||||||
# not use_tools
|
# not use_tools
|
||||||
|
message.status = "thinking"
|
||||||
|
message.response = f"Generating response..."
|
||||||
yield message
|
yield message
|
||||||
# Reset the response for streaming
|
# Reset the response for streaming
|
||||||
message.response = ""
|
message.response = ""
|
||||||
@ -428,13 +425,13 @@ class Agent(BaseModel, ABC):
|
|||||||
return
|
return
|
||||||
|
|
||||||
async def process_message(self, llm: Any, model: str, message:Message) -> AsyncGenerator[Message, None]:
|
async def process_message(self, llm: Any, model: str, message:Message) -> AsyncGenerator[Message, None]:
|
||||||
logging.info(f"{self.agent_type} - {inspect.stack()[1].function}")
|
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||||
|
|
||||||
if not self.context:
|
if not self.context:
|
||||||
raise ValueError("Context is not set for this agent.")
|
raise ValueError("Context is not set for this agent.")
|
||||||
|
|
||||||
if self.context.processing:
|
if self.context.processing:
|
||||||
logging.info("TODO: Implement delay queing; busy for same agent, otherwise return queue size and estimated wait time")
|
logger.info("TODO: Implement delay queing; busy for same agent, otherwise return queue size and estimated wait time")
|
||||||
message.status = "error"
|
message.status = "error"
|
||||||
message.response = "Busy processing another request."
|
message.response = "Busy processing another request."
|
||||||
yield message
|
yield message
|
||||||
@ -460,7 +457,7 @@ class Agent(BaseModel, ABC):
|
|||||||
yield message
|
yield message
|
||||||
|
|
||||||
async for message in self.generate_llm_response(llm, model, message):
|
async for message in self.generate_llm_response(llm, model, message):
|
||||||
# logging.info(f"LLM: {message.status} - {f'...{message.response[-20:]}' if len(message.response) > 20 else message.response}")
|
# logger.info(f"LLM: {message.status} - {f'...{message.response[-20:]}' if len(message.response) > 20 else message.response}")
|
||||||
if message.status == "error":
|
if message.status == "error":
|
||||||
yield message
|
yield message
|
||||||
self.context.processing = False
|
self.context.processing = False
|
||||||
@ -469,7 +466,7 @@ class Agent(BaseModel, ABC):
|
|||||||
|
|
||||||
# Done processing, add message to conversation
|
# Done processing, add message to conversation
|
||||||
message.status = "done"
|
message.status = "done"
|
||||||
self.conversation.add_message(message)
|
self.conversation.add(message)
|
||||||
self.context.processing = False
|
self.context.processing = False
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -1,9 +1,30 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
from typing import Literal, AsyncGenerator, ClassVar, Optional
|
from typing import Literal, AsyncGenerator, ClassVar, Optional, Any
|
||||||
import logging
|
from datetime import datetime
|
||||||
|
import inspect
|
||||||
|
|
||||||
from . base import Agent, registry
|
from . base import Agent, registry
|
||||||
from .. message import Message
|
from .. message import Message
|
||||||
import inspect
|
from .. setup_logging import setup_logging
|
||||||
|
logger = setup_logging()
|
||||||
|
|
||||||
|
system_message = f"""
|
||||||
|
Launched on {datetime.now().isoformat()}.
|
||||||
|
|
||||||
|
When answering queries, follow these steps:
|
||||||
|
|
||||||
|
- First analyze the query to determine if real-time information from the tools might be helpful
|
||||||
|
- Even when <|context|> or <|resume|> is provided, consider whether the tools would provide more current or comprehensive information
|
||||||
|
- Use the provided tools whenever they would enhance your response, regardless of whether context is also available
|
||||||
|
- When presenting weather forecasts, include relevant emojis immediately before the corresponding text. For example, for a sunny day, say \"☀️ Sunny\" or if the forecast says there will be \"rain showers, say \"🌧️ Rain showers\". Use this mapping for weather emojis: Sunny: ☀️, Cloudy: ☁️, Rainy: 🌧️, Snowy: ❄️
|
||||||
|
- When any combination of <|context|>, <|resume|> and tool outputs are relevant, synthesize information from all sources to provide the most complete answer
|
||||||
|
- Always prioritize the most up-to-date and relevant information, whether it comes from <|context|>, <|resume|> or tools
|
||||||
|
- If <|context|> and tool outputs contain conflicting information, prefer the tool outputs as they likely represent more current data
|
||||||
|
- If there is information in the <|context|> or <|resume|> sections to enhance the answer, incorporate it seamlessly and refer to it as 'the latest information' or 'recent data' instead of mentioning '<|context|>' (etc.) or quoting it directly.
|
||||||
|
- Avoid phrases like 'According to the <|context|>' or similar references to the <|context|> or <|resume|>.
|
||||||
|
|
||||||
|
Always use tools, <|resume|>, and <|context|> when possible. Be concise, and never make up information. If you do not know the answer, say so.
|
||||||
|
"""
|
||||||
|
|
||||||
class Chat(Agent):
|
class Chat(Agent):
|
||||||
"""
|
"""
|
||||||
@ -12,5 +33,27 @@ class Chat(Agent):
|
|||||||
agent_type: Literal["chat"] = "chat" # type: ignore
|
agent_type: Literal["chat"] = "chat" # type: ignore
|
||||||
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
||||||
|
|
||||||
|
system_prompt: str = system_message
|
||||||
|
|
||||||
|
async def prepare_message(self, message:Message) -> AsyncGenerator[Message, None]:
|
||||||
|
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||||
|
if not self.context:
|
||||||
|
raise ValueError("Context is not set for this agent.")
|
||||||
|
|
||||||
|
async for message in super().prepare_message(message):
|
||||||
|
if message.status != "done":
|
||||||
|
yield message
|
||||||
|
|
||||||
|
if message.preamble:
|
||||||
|
preamble_types = [f"<|{p}|>" for p in message.preamble.keys()]
|
||||||
|
preamble_types_AND = " and ".join(preamble_types)
|
||||||
|
preamble_types_OR = " or ".join(preamble_types)
|
||||||
|
message.preamble["rules"] = f"""\
|
||||||
|
- Answer the question based on the information provided in the {preamble_types_AND} sections by incorporate it seamlessly and refer to it using natural language instead of mentioning {preamble_types_OR} or quoting it directly.
|
||||||
|
- If there is no information in these sections, answer based on your knowledge, or use any available tools.
|
||||||
|
- Avoid phrases like 'According to the {preamble_types[0]}' or similar references to the {preamble_types_OR}.
|
||||||
|
"""
|
||||||
|
message.preamble["question"] = "Respond to:"
|
||||||
|
|
||||||
# Register the base agent
|
# Register the base agent
|
||||||
registry.register(Chat._agent_type, Chat)
|
registry.register(Chat._agent_type, Chat)
|
||||||
|
@ -1,12 +1,32 @@
|
|||||||
|
from __future__ import annotations
|
||||||
from pydantic import model_validator # type: ignore
|
from pydantic import model_validator # type: ignore
|
||||||
from typing import Literal, ClassVar, Optional
|
from typing import Literal, ClassVar, Optional, Any, AsyncGenerator, List # NOTE: You must import Optional for late binding to work
|
||||||
from .base import Agent, registry
|
from datetime import datetime
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
from . base import Agent, registry
|
||||||
|
from .. conversation import Conversation
|
||||||
|
from .. message import Message
|
||||||
|
from .. setup_logging import setup_logging
|
||||||
|
logger = setup_logging()
|
||||||
|
|
||||||
|
system_fact_check = f"""
|
||||||
|
Launched on {datetime.now().isoformat()}.
|
||||||
|
|
||||||
|
You are a professional resume fact checker. Your task is answer any questions about items identified in the <|discrepancies|>.
|
||||||
|
The <|discrepancies|> indicate inaccuracies or unsupported claims in the <|generated-resume|> based on content from the <|resume|> and <|context|>.
|
||||||
|
|
||||||
|
When answering queries, follow these steps:
|
||||||
|
- If there is information in the <|context|> or <|resume|> sections to enhance the answer, incorporate it seamlessly and refer to it using natural language instead of mentioning '<|context|>' (etc.) or quoting it directly.
|
||||||
|
- Avoid phrases like 'According to the <|context|>' or similar references to the <|context|>, <|generated-resume|>, or <|resume|> tags.
|
||||||
|
""".strip()
|
||||||
|
|
||||||
class FactCheck(Agent):
|
class FactCheck(Agent):
|
||||||
agent_type: Literal["fact_check"] = "fact_check" # type: ignore
|
agent_type: Literal["fact_check"] = "fact_check" # type: ignore
|
||||||
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
||||||
|
|
||||||
facts: str = ""
|
system_prompt:str = system_fact_check
|
||||||
|
facts: str
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def validate_facts(self):
|
def validate_facts(self):
|
||||||
@ -14,5 +34,36 @@ class FactCheck(Agent):
|
|||||||
raise ValueError("Facts cannot be empty")
|
raise ValueError("Facts cannot be empty")
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
async def prepare_message(self, message:Message) -> AsyncGenerator[Message, None]:
|
||||||
|
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||||
|
if not self.context:
|
||||||
|
raise ValueError("Context is not set for this agent.")
|
||||||
|
|
||||||
|
resume_agent = self.context.get_agent("resume")
|
||||||
|
if not resume_agent:
|
||||||
|
raise ValueError("resume agent does not exist")
|
||||||
|
|
||||||
|
message.enable_tools = False
|
||||||
|
|
||||||
|
async for message in super().prepare_message(message):
|
||||||
|
if message.status != "done":
|
||||||
|
yield message
|
||||||
|
|
||||||
|
message.preamble["generated-resume"] = resume_agent.resume
|
||||||
|
message.preamble["discrepancies"] = self.facts
|
||||||
|
|
||||||
|
preamble_types = [f"<|{p}|>" for p in message.preamble.keys()]
|
||||||
|
preamble_types_AND = " and ".join(preamble_types)
|
||||||
|
preamble_types_OR = " or ".join(preamble_types)
|
||||||
|
message.preamble["rules"] = f"""\
|
||||||
|
- Answer the question based on the information provided in the {preamble_types_AND} sections by incorporate it seamlessly and refer to it using natural language instead of mentioning {preamble_types_OR} or quoting it directly.
|
||||||
|
- If there is no information in these sections, answer based on your knowledge, or use any available tools.
|
||||||
|
- Avoid phrases like 'According to the {preamble_types[0]}' or similar references to the {preamble_types_OR}.
|
||||||
|
"""
|
||||||
|
message.preamble["question"] = "Respond to:"
|
||||||
|
|
||||||
|
yield message
|
||||||
|
return
|
||||||
|
|
||||||
# Register the base agent
|
# Register the base agent
|
||||||
registry.register(FactCheck._agent_type, FactCheck)
|
registry.register(FactCheck._agent_type, FactCheck)
|
||||||
|
@ -1,15 +1,63 @@
|
|||||||
|
from __future__ import annotations
|
||||||
from pydantic import model_validator # type: ignore
|
from pydantic import model_validator # type: ignore
|
||||||
from typing import Literal, ClassVar, Optional
|
from typing import Literal, ClassVar, Optional, Any, AsyncGenerator, List # NOTE: You must import Optional for late binding to work
|
||||||
from .base import Agent, registry
|
from datetime import datetime
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
from . base import Agent, registry
|
||||||
from .. conversation import Conversation
|
from .. conversation import Conversation
|
||||||
from .. message import Message
|
from .. message import Message
|
||||||
from abc import ABC
|
from .. setup_logging import setup_logging
|
||||||
|
logger = setup_logging()
|
||||||
|
|
||||||
|
system_generate_resume = f"""
|
||||||
|
Launched on {datetime.now().isoformat()}.
|
||||||
|
|
||||||
|
You are a professional resume writer. Your task is to write a concise, polished, and tailored resume for a specific job based only on the individual's <|context|>.
|
||||||
|
|
||||||
|
When answering queries, follow these steps:
|
||||||
|
|
||||||
|
- You must not invent or assume any inforation not explicitly present in the <|context|>.
|
||||||
|
- Analyze the <|job_description|> to identify skills required for the job.
|
||||||
|
- Use the <|job_description|> provided to guide the focus, tone, and relevant skills or experience to highlight from the <|context|>.
|
||||||
|
- Identify and emphasize the experiences, achievements, and responsibilities from the <|context|> that best align with the <|job_description|>.
|
||||||
|
- Only provide information from <|context|> items if it is relevant to the <|job_description|>.
|
||||||
|
- Do not use the <|job_description|> skills unless listed in <|context|>.
|
||||||
|
- Do not include any information unless it is provided in <|context|>.
|
||||||
|
- Use the <|context|> to create a polished, professional resume.
|
||||||
|
- Do not list any locations or mailing addresses in the resume.
|
||||||
|
- If there is information in the <|context|>, <|job_description|>, <|context|>, or <|resume|> sections to enhance the answer, incorporate it seamlessly and refer to it using natural language instead of mentioning '<|job_description|>' (etc.) or quoting it directly.
|
||||||
|
- Avoid phrases like 'According to the <|context|>' or similar references to the <|context|>, <|job_description|>, or <|context|> tags.
|
||||||
|
- Ensure the langauge is clear, concise, and aligned with industry standards for professional resumes.
|
||||||
|
|
||||||
|
Structure the resume professionally with the following sections where applicable:
|
||||||
|
|
||||||
|
* Name: Use full name
|
||||||
|
* Professional Summary: A 2-4 sentence overview tailored to the job.
|
||||||
|
* Skills: A bullet list of key skills derived from the work history and relevant to the job.
|
||||||
|
* Professional Experience: A detailed list of roles, achievements, and responsibilities from <|context|> that relate to the <|job_description|>.
|
||||||
|
* Education: Include only if available in the work history.
|
||||||
|
* Notes: Indicate the initial draft of the resume was generated using the Backstory application.
|
||||||
|
""".strip()
|
||||||
|
|
||||||
|
system_job_description = f"""
|
||||||
|
Launched on {datetime.now().isoformat()}.
|
||||||
|
|
||||||
|
You are a hiring and job placing specialist. Your task is to answers about a job description.
|
||||||
|
|
||||||
|
When answering queries, follow these steps:
|
||||||
|
- Analyze the <|job_description|> to provide insights for the asked question.
|
||||||
|
- If any financial information is requested, be sure to account for inflation.
|
||||||
|
- If there is information in the <|context|>, <|job_description|>, <|context|>, or <|resume|> sections to enhance the answer, incorporate it seamlessly and refer to it using natural language instead of mentioning '<|job_description|>' (etc.) or quoting it directly.
|
||||||
|
- Avoid phrases like 'According to the <|context|>' or similar references to the <|context|>, <|job_description|>, <|resume|>, or <|context|> tags.
|
||||||
|
""".strip()
|
||||||
|
|
||||||
class JobDescription(Agent):
|
class JobDescription(Agent):
|
||||||
agent_type: Literal["job_description"] = "job_description" # type: ignore
|
agent_type: Literal["job_description"] = "job_description" # type: ignore
|
||||||
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
||||||
|
|
||||||
job_description: str = ""
|
system_prompt: str = system_generate_resume
|
||||||
|
job_description: str
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def validate_job_description(self):
|
def validate_job_description(self):
|
||||||
@ -17,5 +65,63 @@ class JobDescription(Agent):
|
|||||||
raise ValueError("Job description cannot be empty")
|
raise ValueError("Job description cannot be empty")
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
async def prepare_message(self, message:Message) -> AsyncGenerator[Message, None]:
|
||||||
|
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||||
|
if not self.context:
|
||||||
|
raise ValueError("Context is not set for this agent.")
|
||||||
|
|
||||||
|
async for message in super().prepare_message(message):
|
||||||
|
if message.status != "done":
|
||||||
|
yield message
|
||||||
|
|
||||||
|
# Always add the job description, user resume, and question
|
||||||
|
message.preamble["job_description"] = self.job_description
|
||||||
|
message.preamble["resume"] = self.context.user_resume
|
||||||
|
|
||||||
|
preamble_types = [f"<|{p}|>" for p in message.preamble.keys()]
|
||||||
|
preamble_types_AND = " and ".join(preamble_types)
|
||||||
|
preamble_types_OR = " or ".join(preamble_types)
|
||||||
|
message.preamble["rules"] = f"""\
|
||||||
|
- Answer the question based on the information provided in the {preamble_types_AND} sections by incorporating it seamlessly and refer to it using natural language instead of mentioning {preamble_types_OR} or quoting it directly.
|
||||||
|
- If there is no information in these sections, answer based on your knowledge, or use any available tools.
|
||||||
|
- Avoid phrases like 'According to the {preamble_types[0]}' or similar references to the {preamble_types_OR}.
|
||||||
|
"""
|
||||||
|
|
||||||
|
resume_agent = self.context.get_agent(agent_type="resume")
|
||||||
|
if resume_agent:
|
||||||
|
message.preamble["question"] = "Respond to:"
|
||||||
|
else:
|
||||||
|
message.preamble["question"] = "Generate a resume given the <|resume|> and <|job_description|>."
|
||||||
|
|
||||||
|
yield message
|
||||||
|
return
|
||||||
|
|
||||||
|
async def process_message(self, llm: Any, model: str, message:Message) -> AsyncGenerator[Message, None]:
|
||||||
|
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||||
|
if not self.context:
|
||||||
|
raise ValueError("Context is not set for this agent.")
|
||||||
|
|
||||||
|
async for message in super().process_message(llm, model, message):
|
||||||
|
if message.status != "done":
|
||||||
|
yield message
|
||||||
|
|
||||||
|
resume_agent = self.context.get_agent(agent_type="resume")
|
||||||
|
if not resume_agent:
|
||||||
|
# Switch agent from "Create Resume from Job Desription" mode
|
||||||
|
# to "Answer Questions about Job Description"
|
||||||
|
self.system_prompt = system_job_description
|
||||||
|
|
||||||
|
# Instantiate the "resume" agent, and seed (or reset) its conversation
|
||||||
|
# with this message.
|
||||||
|
resume_agent = self.context.get_or_create_agent(agent_type="resume", resume=message.response)
|
||||||
|
first_resume_message = message.copy()
|
||||||
|
first_resume_message.prompt = "Generate a resume for the job description."
|
||||||
|
resume_agent.conversation.add(first_resume_message)
|
||||||
|
message.response = "Resume generated."
|
||||||
|
|
||||||
|
# Return the final message
|
||||||
|
yield message
|
||||||
|
return
|
||||||
|
|
||||||
# Register the base agent
|
# Register the base agent
|
||||||
registry.register(JobDescription._agent_type, JobDescription)
|
registry.register(JobDescription._agent_type, JobDescription)
|
||||||
|
@ -1,12 +1,47 @@
|
|||||||
|
from __future__ import annotations
|
||||||
from pydantic import model_validator # type: ignore
|
from pydantic import model_validator # type: ignore
|
||||||
from typing import Literal, Optional, ClassVar
|
from typing import Literal, ClassVar, Optional, Any, AsyncGenerator, List # NOTE: You must import Optional for late binding to work
|
||||||
from .base import Agent, registry
|
from datetime import datetime
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
from . base import Agent, registry
|
||||||
|
from .. message import Message
|
||||||
|
from .. setup_logging import setup_logging
|
||||||
|
logger = setup_logging()
|
||||||
|
|
||||||
|
system_fact_check = f"""
|
||||||
|
Launched on {datetime.now().isoformat()}.
|
||||||
|
|
||||||
|
You are a professional resume fact checker. Your task is to identify any inaccuracies in the <|generated-resume|> based on the individual's <|context|> and <|resume|>.
|
||||||
|
|
||||||
|
If there are inaccuracies, list them in a bullet point format.
|
||||||
|
|
||||||
|
When answering queries, follow these steps:
|
||||||
|
- Analyze the <|generated-resume|> to identify any discrepancies or inaccuracies which are not supported by the <|context|> and <|resume|>.
|
||||||
|
- If there is information in the <|context|> or <|resume|> sections to enhance the answer, incorporate it seamlessly and refer to it using natural language instead of mentioning '<|context|>' (etc.) or quoting it directly.
|
||||||
|
- Avoid phrases like 'According to the <|context|>' or similar references to the <|context|>, <|generated-resume|>, or <|resume|> tags.
|
||||||
|
|
||||||
|
Do not generate a revised resume.
|
||||||
|
""".strip()
|
||||||
|
|
||||||
|
system_resume = f"""
|
||||||
|
Launched on {datetime.now().isoformat()}.
|
||||||
|
|
||||||
|
You are a hiring and job placing specialist. Your task is to answers about a resume and work history as it relates to a potential job.
|
||||||
|
|
||||||
|
When answering queries, follow these steps:
|
||||||
|
- Analyze the <|job_description|> and <|generated-resume|> to provide insights for the asked question.
|
||||||
|
- If any financial information is requested, be sure to account for inflation.
|
||||||
|
- If there is information in the <|context|>, <|job_description|>, <|generated-resume|>, or <|resume|> sections to enhance the answer, incorporate it seamlessly and refer to it using natural language instead of mentioning '<|job_description|>' (etc.) or quoting it directly.
|
||||||
|
- Avoid phrases like 'According to the <|context|>' or similar references to the <|context|>, <|job_description|>, <|resume|>, or <|context|> tags.
|
||||||
|
""".strip()
|
||||||
|
|
||||||
class Resume(Agent):
|
class Resume(Agent):
|
||||||
agent_type: Literal["resume"] = "resume" # type: ignore
|
agent_type: Literal["resume"] = "resume" # type: ignore
|
||||||
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
||||||
|
|
||||||
resume: str = ""
|
system_prompt:str = system_fact_check
|
||||||
|
resume: str
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def validate_resume(self):
|
def validate_resume(self):
|
||||||
@ -14,13 +49,65 @@ class Resume(Agent):
|
|||||||
raise ValueError("Resume content cannot be empty")
|
raise ValueError("Resume content cannot be empty")
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def get_resume(self) -> str:
|
async def prepare_message(self, message:Message) -> AsyncGenerator[Message, None]:
|
||||||
"""Get the resume content."""
|
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||||
return self.resume
|
if not self.context:
|
||||||
|
raise ValueError("Context is not set for this agent.")
|
||||||
|
|
||||||
|
async for message in super().prepare_message(message):
|
||||||
|
if message.status != "done":
|
||||||
|
yield message
|
||||||
|
|
||||||
def set_resume(self, resume: str) -> None:
|
message.preamble["generated-resume"] = self.resume
|
||||||
"""Set the resume content."""
|
job_description_agent = self.context.get_agent("job_description")
|
||||||
self.resume = resume
|
if not job_description_agent:
|
||||||
|
raise ValueError("job_description agent does not exist")
|
||||||
|
|
||||||
|
message.preamble["job_description"] = job_description_agent.job_description
|
||||||
|
|
||||||
|
preamble_types = [f"<|{p}|>" for p in message.preamble.keys()]
|
||||||
|
preamble_types_AND = " and ".join(preamble_types)
|
||||||
|
preamble_types_OR = " or ".join(preamble_types)
|
||||||
|
message.preamble["rules"] = f"""\
|
||||||
|
- Answer the question based on the information provided in the {preamble_types_AND} sections by incorporate it seamlessly and refer to it using natural language instead of mentioning {preamble_types_OR} or quoting it directly.
|
||||||
|
- If there is no information in these sections, answer based on your knowledge, or use any available tools.
|
||||||
|
- Avoid phrases like 'According to the {preamble_types[0]}' or similar references to the {preamble_types_OR}.
|
||||||
|
"""
|
||||||
|
fact_check_agent = self.context.get_agent(agent_type="fact_check")
|
||||||
|
if fact_check_agent:
|
||||||
|
message.preamble["question"] = "Respond to:"
|
||||||
|
else:
|
||||||
|
message.preamble["question"] = f"Fact check the <|generated-resume|> based on the <|resume|>{' and <|context|>' if 'context' in message.preamble else ''}."
|
||||||
|
|
||||||
|
yield message
|
||||||
|
return
|
||||||
|
|
||||||
|
async def process_message(self, llm: Any, model: str, message:Message) -> AsyncGenerator[Message, None]:
|
||||||
|
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||||
|
if not self.context:
|
||||||
|
raise ValueError("Context is not set for this agent.")
|
||||||
|
|
||||||
|
async for message in super().process_message(llm, model, message):
|
||||||
|
if message.status != "done":
|
||||||
|
yield message
|
||||||
|
|
||||||
|
fact_check_agent = self.context.get_agent(agent_type="fact_check")
|
||||||
|
if not fact_check_agent:
|
||||||
|
# Switch agent from "Fact Check Generated Resume" mode
|
||||||
|
# to "Answer Questions about Generated Resume"
|
||||||
|
self.system_prompt = system_resume
|
||||||
|
|
||||||
|
# Instantiate the "resume" agent, and seed (or reset) its conversation
|
||||||
|
# with this message.
|
||||||
|
fact_check_agent = self.context.get_or_create_agent(agent_type="fact_check", facts=message.response)
|
||||||
|
first_fact_check_message = message.copy()
|
||||||
|
first_fact_check_message.prompt = "Fact check the generated resume."
|
||||||
|
fact_check_agent.conversation.add(first_fact_check_message)
|
||||||
|
message.response = "Resume fact checked."
|
||||||
|
|
||||||
|
# Return the final message
|
||||||
|
yield message
|
||||||
|
return
|
||||||
|
|
||||||
# Register the base agent
|
# Register the base agent
|
||||||
registry.register(Resume._agent_type, Resume)
|
registry.register(Resume._agent_type, Resume)
|
||||||
|
@ -6,13 +6,12 @@ from typing_extensions import Annotated, Union
|
|||||||
import numpy as np # type: ignore
|
import numpy as np # type: ignore
|
||||||
import logging
|
import logging
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
import re
|
|
||||||
|
|
||||||
from .message import Message
|
from . message import Message, Tunables
|
||||||
from .rag import ChromaDBFileWatcher
|
from . rag import ChromaDBFileWatcher
|
||||||
from . import defines
|
from . import defines
|
||||||
from . import tools as Tools
|
from . import tools as Tools
|
||||||
from .agents import AnyAgent
|
from . agents import AnyAgent
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -30,7 +29,7 @@ class Context(BaseModel):
|
|||||||
user_resume: Optional[str] = None
|
user_resume: Optional[str] = None
|
||||||
user_job_description: Optional[str] = None
|
user_job_description: Optional[str] = None
|
||||||
user_facts: Optional[str] = None
|
user_facts: Optional[str] = None
|
||||||
tools: List[dict] = Tools.default_tools(Tools.tools)
|
tools: List[dict] = Tools.enabled_tools(Tools.tools)
|
||||||
rags: List[dict] = []
|
rags: List[dict] = []
|
||||||
message_history_length: int = 5
|
message_history_length: int = 5
|
||||||
# Class managed fields
|
# Class managed fields
|
||||||
|
@ -1,22 +1,41 @@
|
|||||||
from pydantic import BaseModel
|
from pydantic import BaseModel, Field, PrivateAttr # type: ignore
|
||||||
from typing import List
|
from typing import List
|
||||||
from .message import Message
|
from .message import Message
|
||||||
|
|
||||||
class Conversation(BaseModel):
|
class Conversation(BaseModel):
|
||||||
messages: List[Message] = []
|
Conversation_messages: List[Message] = Field(default=[], alias="messages")
|
||||||
|
|
||||||
def add_message(self, message: Message | List[Message]) -> None:
|
def __len__(self):
|
||||||
|
return len(self.Conversation_messages)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return iter(self.Conversation_messages)
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.Conversation_messages = []
|
||||||
|
|
||||||
|
@property
|
||||||
|
def messages(self):
|
||||||
|
"""Return a copy of messages to prevent modification of the internal list."""
|
||||||
|
raise AttributeError("Cannot directly get messages. Use Conversation.add() or .reset()")
|
||||||
|
|
||||||
|
@messages.setter
|
||||||
|
def messages(self, value):
|
||||||
|
"""Control how messages can be set, or prevent setting altogether."""
|
||||||
|
raise AttributeError("Cannot directly set messages. Use Conversation.add() or .reset()")
|
||||||
|
|
||||||
|
def add(self, message: Message | List[Message]) -> None:
|
||||||
"""Add a Message(s) to the conversation."""
|
"""Add a Message(s) to the conversation."""
|
||||||
if isinstance(message, Message):
|
if isinstance(message, Message):
|
||||||
self.messages.append(message)
|
self.Conversation_messages.append(message)
|
||||||
else:
|
else:
|
||||||
self.messages.extend(message)
|
self.Conversation_messages.extend(message)
|
||||||
|
|
||||||
def get_summary(self) -> str:
|
def get_summary(self) -> str:
|
||||||
"""Return a summary of the conversation."""
|
"""Return a summary of the conversation."""
|
||||||
if not self.messages:
|
if not self.Conversation_messages:
|
||||||
return "Conversation is empty."
|
return "Conversation is empty."
|
||||||
summary = f"Conversation:\n"
|
summary = f"Conversation:\n"
|
||||||
for i, message in enumerate(self.messages, 1):
|
for i, message in enumerate(self.Conversation_messages, 1):
|
||||||
summary += f"\nMessage {i}:\n{message.get_summary()}\n"
|
summary += f"\nMessage {i}:\n{message.get_summary()}\n"
|
||||||
return summary
|
return summary
|
@ -1,14 +1,18 @@
|
|||||||
from pydantic import BaseModel # type: ignore
|
from pydantic import BaseModel, Field # type: ignore
|
||||||
from typing import Dict, List, Optional, Any
|
from typing import Dict, List, Optional, Any
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
class Tunables(BaseModel):
|
||||||
|
enable_rag : bool = Field(default=True) # Enable RAG collection chromadb matching
|
||||||
|
enable_tools : bool = Field(default=True) # Enable LLM to use tools
|
||||||
|
enable_context : bool = Field(default=True) # Add <|context|> field to message
|
||||||
|
|
||||||
class Message(BaseModel):
|
class Message(BaseModel):
|
||||||
# Required
|
# Required
|
||||||
prompt: str # Query to be answered
|
prompt: str # Query to be answered
|
||||||
|
|
||||||
# Tunables
|
# Tunables
|
||||||
enable_rag: bool = True
|
tunables: Tunables = Field(default_factory=Tunables)
|
||||||
enable_tools: bool = True
|
|
||||||
|
|
||||||
# Generated while processing message
|
# Generated while processing message
|
||||||
status: str = "" # Status of the message
|
status: str = "" # Status of the message
|
||||||
@ -16,14 +20,14 @@ class Message(BaseModel):
|
|||||||
system_prompt: str = "" # System prompt provided to the LLM
|
system_prompt: str = "" # System prompt provided to the LLM
|
||||||
context_prompt: str = "" # Full content of the message (preamble + prompt)
|
context_prompt: str = "" # Full content of the message (preamble + prompt)
|
||||||
response: str = "" # LLM response to the preamble + query
|
response: str = "" # LLM response to the preamble + query
|
||||||
metadata: dict[str, Any] = {
|
metadata: Dict[str, Any] = Field(default_factory=lambda: {
|
||||||
"rag": List[dict[str, Any]],
|
"rag": [],
|
||||||
"eval_count": 0,
|
"eval_count": 0,
|
||||||
"eval_duration": 0,
|
"eval_duration": 0,
|
||||||
"prompt_eval_count": 0,
|
"prompt_eval_count": 0,
|
||||||
"prompt_eval_duration": 0,
|
"prompt_eval_duration": 0,
|
||||||
"context_size": 0,
|
"context_size": 0,
|
||||||
}
|
})
|
||||||
network_packets: int = 0 # Total number of streaming packets
|
network_packets: int = 0 # Total number of streaming packets
|
||||||
network_bytes: int = 0 # Total bytes sent while streaming packets
|
network_bytes: int = 0 # Total bytes sent while streaming packets
|
||||||
actions: List[str] = [] # Other session modifying actions performed while processing the message
|
actions: List[str] = [] # Other session modifying actions performed while processing the message
|
||||||
|
Loading…
x
Reference in New Issue
Block a user