Added more interactive view of what the LLM did

This commit is contained in:
James Ketr 2025-05-09 17:49:51 -07:00
parent 695bf5f58c
commit 0216515492
10 changed files with 311 additions and 253 deletions

View File

@ -24,6 +24,7 @@
"@types/react": "^19.0.12",
"@types/react-dom": "^19.0.4",
"@uiw/react-json-view": "^2.0.0-alpha.31",
"jsonrepair": "^3.12.0",
"mermaid": "^11.6.0",
"mui-markdown": "^2.0.1",
"prism-react-renderer": "^2.4.1",
@ -14170,6 +14171,14 @@
"node": ">=0.10.0"
}
},
"node_modules/jsonrepair": {
"version": "3.12.0",
"resolved": "https://registry.npmjs.org/jsonrepair/-/jsonrepair-3.12.0.tgz",
"integrity": "sha512-SWfjz8SuQ0wZjwsxtSJ3Zy8vvLg6aO/kxcp9TWNPGwJKgTZVfhNEQBMk/vPOpYCDFWRxD6QWuI6IHR1t615f0w==",
"bin": {
"jsonrepair": "bin/cli.js"
}
},
"node_modules/jsx-ast-utils": {
"version": "3.3.5",
"resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz",

View File

@ -19,6 +19,7 @@
"@types/react": "^19.0.12",
"@types/react-dom": "^19.0.4",
"@uiw/react-json-view": "^2.0.0-alpha.31",
"jsonrepair": "^3.12.0",
"mermaid": "^11.6.0",
"mui-markdown": "^2.0.1",
"prism-react-renderer": "^2.4.1",

View File

@ -19,11 +19,12 @@ interface ChatBubbleProps {
className?: string;
title?: string;
expanded?: boolean;
expandable?: boolean;
onExpand?: () => void;
}
function ChatBubble(props: ChatBubbleProps) {
const { role, children, sx, className, title, onExpand }: ChatBubbleProps = props;
const { role, children, sx, className, title, onExpand, expandable }: ChatBubbleProps = props;
const [expanded, setExpanded] = useState<boolean>((props.expanded === undefined) ? true : props.expanded);
const theme = useTheme();
@ -48,15 +49,7 @@ function ChatBubble(props: ChatBubbleProps) {
}
}
const styles = {
'user': {
...defaultStyle,
backgroundColor: theme.palette.background.default, // Warm Gray (#D3CDBF)
border: `1px solid ${theme.palette.custom.highlight}`, // Golden Ochre (#D4A017)
borderRadius: `${defaultRadius} ${defaultRadius} 0 ${defaultRadius}`, // Rounded, flat bottom-right for user
alignSelf: 'flex-end', // Right-aligned for user
color: theme.palette.primary.main, // Midnight Blue (#1A2536) for text
},
const styles: any = {
'assistant': {
...defaultStyle,
backgroundColor: theme.palette.primary.main, // Midnight Blue (#1A2536)
@ -64,52 +57,6 @@ function ChatBubble(props: ChatBubbleProps) {
borderRadius: `${defaultRadius} ${defaultRadius} ${defaultRadius} 0`, // Rounded, flat bottom-left for assistant
color: theme.palette.primary.contrastText, // Warm Gray (#D3CDBF) for text
},
'system': {
...defaultStyle,
backgroundColor: '#EDEAE0', // Soft warm gray that plays nice with #D3CDBF
border: `1px dashed ${theme.palette.custom.highlight}`, // Golden Ochre
borderRadius: defaultRadius,
maxWidth: '90%',
minWidth: '90%',
alignSelf: 'center',
color: theme.palette.text.primary, // Charcoal Black
fontStyle: 'italic',
},
'info': {
...defaultStyle,
backgroundColor: '#BFD8D8', // Softened Dusty Teal
border: `1px solid ${theme.palette.secondary.main}`, // Dusty Teal
borderRadius: defaultRadius,
color: theme.palette.text.primary, // Charcoal Black (#2E2E2E) — much better contrast
opacity: 0.95,
},
'status': {
...defaultStyle,
backgroundColor: 'rgba(74, 122, 125, 0.15)', // Translucent dusty teal
border: `1px solid ${theme.palette.secondary.light}`, // Lighter dusty teal
borderRadius: '4px',
maxWidth: '75%',
minWidth: '75%',
alignSelf: 'center',
color: theme.palette.secondary.dark, // Darker dusty teal for text
fontWeight: 500, // Slightly bolder than normal
fontSize: '0.95rem', // Slightly smaller
padding: '8px 12px',
opacity: 0.9,
transition: 'opacity 0.3s ease-in-out', // Smooth fade effect for appearing/disappearing
},
'error': {
...defaultStyle,
backgroundColor: '#F8E7E7', // Soft light red background
border: `1px solid #D83A3A`, // Prominent red border
borderRadius: defaultRadius,
maxWidth: '90%',
minWidth: '90%',
alignSelf: 'center',
color: '#8B2525', // Deep red text for good contrast
padding: '10px 16px',
boxShadow: '0 1px 3px rgba(216, 58, 58, 0.15)', // Subtle shadow with red tint
},
'content': {
...defaultStyle,
backgroundColor: '#F5F2EA', // Light cream background for easy reading
@ -124,31 +71,93 @@ function ChatBubble(props: ChatBubbleProps) {
lineHeight: '1.3', // More compact line height
fontFamily: theme.typography.fontFamily, // Consistent font with your theme
},
'thinking': {
...defaultStyle
'error': {
...defaultStyle,
backgroundColor: '#F8E7E7', // Soft light red background
border: `1px solid #D83A3A`, // Prominent red border
borderRadius: defaultRadius,
maxWidth: '90%',
minWidth: '90%',
alignSelf: 'center',
color: '#8B2525', // Deep red text for good contrast
padding: '10px 16px',
boxShadow: '0 1px 3px rgba(216, 58, 58, 0.15)', // Subtle shadow with red tint
},
'streaming': {
...defaultStyle
'fact-check': 'qualifications',
'job-description': 'content',
'job-requirements': 'qualifications',
'info': {
...defaultStyle,
backgroundColor: '#BFD8D8', // Softened Dusty Teal
border: `1px solid ${theme.palette.secondary.main}`, // Dusty Teal
borderRadius: defaultRadius,
color: theme.palette.text.primary, // Charcoal Black (#2E2E2E) — much better contrast
opacity: 0.95,
},
'processing': {
...defaultStyle
'processing': "status",
'qualifications': {
...defaultStyle,
backgroundColor: theme.palette.primary.light, // Lighter shade, e.g., Soft Blue (#2A3B56)
border: `1px solid ${theme.palette.secondary.main}`, // Keep Dusty Teal (#4A7A7D) for contrast
borderRadius: `${defaultRadius} ${defaultRadius} ${defaultRadius} 0`, // Unchanged
color: theme.palette.primary.contrastText, // Warm Gray (#D3CDBF) for readable text
},
'resume': 'content',
'searching': 'status',
'status': {
...defaultStyle,
backgroundColor: 'rgba(74, 122, 125, 0.15)', // Translucent dusty teal
border: `1px solid ${theme.palette.secondary.light}`, // Lighter dusty teal
borderRadius: '4px',
maxWidth: '75%',
minWidth: '75%',
alignSelf: 'center',
color: theme.palette.secondary.dark, // Darker dusty teal for text
fontWeight: 500, // Slightly bolder than normal
fontSize: '0.95rem', // Slightly smaller
padding: '8px 12px',
opacity: 0.9,
transition: 'opacity 0.3s ease-in-out', // Smooth fade effect for appearing/disappearing
},
'streaming': "assistant",
'system': {
...defaultStyle,
backgroundColor: '#EDEAE0', // Soft warm gray that plays nice with #D3CDBF
border: `1px dashed ${theme.palette.custom.highlight}`, // Golden Ochre
borderRadius: defaultRadius,
maxWidth: '90%',
minWidth: '90%',
alignSelf: 'center',
color: theme.palette.text.primary, // Charcoal Black
fontStyle: 'italic',
},
'thinking': "status",
'user': {
...defaultStyle,
backgroundColor: theme.palette.background.default, // Warm Gray (#D3CDBF)
border: `1px solid ${theme.palette.custom.highlight}`, // Golden Ochre (#D4A017)
borderRadius: `${defaultRadius} ${defaultRadius} 0 ${defaultRadius}`, // Rounded, flat bottom-right for user
alignSelf: 'flex-end', // Right-aligned for user
color: theme.palette.primary.main, // Midnight Blue (#1A2536) for text
},
};
styles["thinking"] = styles["status"]
styles["streaming"] = styles["assistant"]
styles["processing"] = styles["status"]
for (const [key, value] of Object.entries(styles)) {
if (typeof (value) === "string") {
(styles as any)[key] = styles[value];
}
}
const icons: any = {
"searching": <Memory />,
"thinking": <Psychology />,
// "streaming": <Stream />,
"tooling": <LocationSearchingIcon />,
"processing": <LocationSearchingIcon />,
"error": <ErrorOutline color='error' />,
"info": <InfoOutline color='info' />,
"processing": <LocationSearchingIcon />,
// "streaming": <Stream />,
"searching": <Memory />,
"thinking": <Psychology />,
"tooling": <LocationSearchingIcon />,
};
if (role === 'content' && title) {
if (expandable || (role === 'content' && title)) {
return (
<Accordion
expanded={expanded}
@ -160,7 +169,7 @@ function ChatBubble(props: ChatBubbleProps) {
expandIcon={<ExpandMoreIcon />}
slotProps={{ content: { sx: { fontWeight: 'bold', fontSize: '1.1rem', m: 0, p: 0, display: 'flex', justifyItems: 'center' } } }}
>
{title}
{title || ""}
</AccordionSummary>
<AccordionDetails sx={{ mt: 0, mb: 0, p: 0, pl: 2, pr: 2 }}>
{children}
@ -170,7 +179,7 @@ function ChatBubble(props: ChatBubbleProps) {
}
return (
<Box className={className} sx={{ ...(styles[role] !== undefined ? styles[role] : styles["status"]), gap: 1, display: "flex", ...sx, flexDirection: "row" }}>
<Box className={className} sx={{ ...(role in styles ? styles[role] : styles["status"]), gap: 1, display: "flex", ...sx, flexDirection: "row" }}>
{icons[role] !== undefined && icons[role]}
<Box sx={{ p: 0, m: 0, gap: 0, display: "flex", flexGrow: 1, flexDirection: "column" }}>
{children}

View File

@ -6,6 +6,7 @@ interface DocumentProps {
title: string;
expanded?: boolean;
filepath: string;
content?: string;
setSnack: SetSnackType;
submitQuery?: MessageSubmitQuery;
connectionBase: string;
@ -14,13 +15,13 @@ interface DocumentProps {
}
const Document = (props: DocumentProps) => {
const { setSnack, submitQuery, connectionBase, filepath, title, expanded, disableCopy, onExpand } = props;
const { setSnack, submitQuery, connectionBase, filepath, content, title, expanded, disableCopy, onExpand } = props;
const [document, setDocument] = useState<string>("");
// Get the markdown
useEffect(() => {
if (document !== "") {
if (document !== "" || !filepath) {
return;
}
const fetchDocument = async () => {
@ -46,7 +47,6 @@ const Document = (props: DocumentProps) => {
}, [document, setDocument, filepath])
return (
<>
<Message
{...{
sx: {
@ -56,7 +56,7 @@ const Document = (props: DocumentProps) => {
m: 0,
flexGrow: 0,
},
message: { role: 'content', title: title, content: document },
message: { role: 'content', title: title, content: document || content || "" },
connectionBase,
submitQuery,
setSnack,
@ -64,8 +64,6 @@ const Document = (props: DocumentProps) => {
disableCopy,
onExpand,
}} />
{/* <Box sx={{ display: "flex", flexGrow: 1, p: 0, m: 0 }} /> */}
</>
);
};

View File

@ -35,7 +35,6 @@ const Mermaid: React.FC<MermaidProps> = (props: MermaidProps) => {
const renderMermaid = async () => {
if (containerRef.current && visible && chart) {
try {
console.log("Rendering Mermaid");
await mermaid.initialize(mermaidConfig || defaultMermaidConfig);
await mermaid.run({ nodes: [containerRef.current] });
} catch (e) {

View File

@ -29,7 +29,22 @@ import { SetSnackType } from './Snack';
import { CopyBubble } from './CopyBubble';
import { Scrollable } from './Scrollable';
type MessageRoles = 'info' | 'user' | 'assistant' | 'system' | 'status' | 'error' | 'content' | 'thinking' | 'processing' | 'streaming';
type MessageRoles =
'assistant' |
'content' |
'error' |
'fact-check' |
'info' |
'job-description' |
'job-requirements' |
'processing' |
'qualifications' |
'resume' |
'status' |
'streaming' |
'system' |
'thinking' |
'user';
type MessageData = {
role: MessageRoles,
@ -44,7 +59,9 @@ type MessageData = {
id?: string,
isProcessing?: boolean,
actions?: string[],
metadata?: MessageMetaData
metadata?: MessageMetaData,
expanded?: boolean,
expandable?: boolean,
};
interface MessageMetaData {
@ -73,7 +90,7 @@ type MessageList = MessageData[];
interface MessageProps {
sx?: SxProps<Theme>,
message: MessageData,
expanded?: boolean,
// expanded?: boolean, // Provided as part of MessageData
onExpand?: () => void,
submitQuery?: MessageSubmitQuery,
sessionId?: string,
@ -226,7 +243,6 @@ const MessageMeta = (props: MessageMetaProps) => {
const Message = (props: MessageProps) => {
const { message, submitQuery, sx, className, onExpand } = props;
const messageExpanded = props.expanded;
const [expanded, setExpanded] = useState<boolean>(false);
const textFieldRef = useRef(null);
@ -248,9 +264,7 @@ const Message = (props: MessageProps) => {
return (
<ChatBubble
className={className || "Message"}
role={message.role}
title={message.title}
expanded={messageExpanded}
{...message}
onExpand={onExpand}
sx={{
display: "flex",

View File

@ -11,6 +11,8 @@ import { MessageList, MessageData } from './Message';
import { SetSnackType } from './Snack';
import { Conversation } from './Conversation';
import './ResumeBuilder.css';
interface ResumeBuilderProps {
connectionBase: string,
sessionId: string | undefined,
@ -67,133 +69,88 @@ const ResumeBuilder: React.FC<ResumeBuilderProps> = ({
if (messages === undefined || messages.length === 0) {
return [];
}
console.log("filterJobDescriptionMessages disabled", messages)
if (messages.length > 1) {
if (messages.length > 2) {
setHasResume(true);
setHasFacts(true);
}
messages[0].role = 'content';
messages[0].title = 'Job Description';
messages[0].disableCopy = false;
if (messages.length > 0) {
messages[0].role = 'content';
messages[0].title = 'Job Description';
messages[0].disableCopy = false;
messages[0].expandable = true;
}
return messages;
if (messages.length > 3) {
// messages[2] is Show job requirements
messages[3].role = 'job-requirements';
messages[3].title = 'Job Requirements';
messages[3].disableCopy = false;
messages[3].expanded = true;
messages[3].expandable = true;
}
// let reduced = messages.filter((m, i) => {
// const keep = (m.metadata?.origin || m.origin || "no origin") === 'job_description';
// if ((m.metadata?.origin || m.origin || "no origin") === 'resume') {
// setHasResume(true);
// }
// // if (!keep) {
// // console.log(`filterJobDescriptionMessages: ${i + 1} filtered:`, m);
// // } else {
// // console.log(`filterJobDescriptionMessages: ${i + 1}:`, m);
// // }
/* Filter out the 2nd and 3rd (0-based) */
const filtered = messages.filter((m, i) => i !== 1 && i !== 2);
// return keep;
// });
// /* If Resume hasn't occurred yet and there is still more than one message,
// * resume has been generated. */
// if (!hasResume && reduced.length > 1) {
// setHasResume(true);
// }
// if (reduced.length > 0) {
// // First message is always 'content'
// reduced[0].title = 'Job Description';
// reduced[0].role = 'content';
// setHasJobDescription(true);
// }
// /* Filter out any messages which the server injected for state management */
// reduced = reduced.filter(m => m.display !== "hide");
// return reduced;
}, [setHasResume/*, setHasJobDescription, hasResume*/]);
return filtered;
}, [setHasResume, setHasFacts]);
const filterResumeMessages = useCallback((messages: MessageList): MessageList => {
if (messages === undefined || messages.length === 0) {
return [];
}
console.log("filterResumeMessages disabled")
if (messages.length > 3) {
setHasFacts(true);
if (messages.length > 1) {
// messages[0] is Show Qualifications
messages[1].role = 'qualifications';
messages[1].title = 'Candidate qualifications';
messages[1].disableCopy = false;
messages[1].expanded = false;
messages[1].expandable = true;
}
return messages;
// let reduced = messages.filter((m, i) => {
// const keep = (m.metadata?.origin || m.origin || "no origin") === 'resume';
// if ((m.metadata?.origin || m.origin || "no origin") === 'fact_check') {
// setHasFacts(true);
// }
// if (!keep) {
// console.log(`filterResumeMessages: ${i + 1} filtered:`, m);
// } else {
// console.log(`filterResumeMessages: ${i + 1}:`, m);
// }
// return keep;
// });
if (messages.length > 3) {
// messages[2] is Show Resume
messages[3].role = 'resume';
messages[3].title = 'Generated Resume';
messages[3].disableCopy = false;
messages[3].expanded = true;
messages[3].expandable = true;
}
// /* If there is more than one message, it is user: "...JOB_DESCRIPTION...", assistant: "...RESUME..."
// * which means a resume has been generated. */
// if (reduced.length > 1) {
// /* Remove the assistant message from the UI */
// if (reduced[0].role === "user") {
// reduced.splice(0, 1);
// }
// }
/* Filter out the 1st and 3rd messages (0-based) */
const filtered = messages.filter((m, i) => i !== 0 && i !== 2);
// /* If Fact Check hasn't occurred yet and there is still more than one message,
// * facts have have been generated. */
// if (!hasFacts && reduced.length > 1) {
// setHasFacts(true);
// }
// /* Filter out any messages which the server injected for state management */
// reduced = reduced.filter(m => m.display !== "hide");
// /* If there are any messages, there is a resume */
// if (reduced.length > 0) {
// // First message is always 'content'
// reduced[0].title = 'Resume';
// reduced[0].role = 'content';
// setHasResume(true);
// }
// return reduced;
}, [/*setHasResume, hasFacts,*/ setHasFacts]);
return filtered;
}, []);
const filterFactsMessages = useCallback((messages: MessageList): MessageList => {
if (messages === undefined || messages.length === 0) {
return [];
}
console.log("filterFactsMessages disabled")
return messages;
// messages.forEach((m, i) => console.log(`filterFactsMessages: ${i + 1}:`, m))
if (messages.length > 1) {
// messages[0] is Show verification
messages[1].role = 'fact-check';
messages[1].title = 'Fact Check';
messages[1].disableCopy = false;
messages[1].expanded = false;
messages[1].expandable = true;
}
// const reduced = messages.filter(m => {
// return (m.metadata?.origin || m.origin || "no origin") === 'fact_check';
// });
/* Filter out the 1st (0-based) */
const filtered = messages.filter((m, i) => i !== 0);
// /* If there is more than one message, it is user: "Fact check this resume...", assistant: "...FACT CHECK..."
// * which means facts have been generated. */
// if (reduced.length > 1) {
// /* Remove the user message from the UI */
// if (reduced[0].role === "user") {
// reduced.splice(0, 1);
// }
// // First message is always 'content'
// reduced[0].title = 'Fact Check';
// reduced[0].role = 'content';
// setHasFacts(true);
// }
// return reduced;
}, [/*setHasFacts*/]);
return filtered;
}, []);
const jobResponse = useCallback(async (message: MessageData) => {
console.log('onJobResponse', message);
if (message.actions && message.actions.includes("job_description")) {
await jobConversationRef.current.fetchHistory();
}
if (message.actions && message.actions.includes("resume_generated")) {
await resumeConversationRef.current.fetchHistory();
setHasResume(true);
@ -238,12 +195,29 @@ const ResumeBuilder: React.FC<ResumeBuilderProps> = ({
</Box>,
];
const jobDescriptionPreamble: MessageList = [{
role: 'info',
content: `Once you paste a job description and press **Generate Resume**, the system will perform the following actions:
1. **RAG**: Collects information from the RAG database relavent to the job description
2. **Isolated Analysis**: Three sub-stages
1. **Job Analysis**: Extracts requirements from job description only
2. **Candidate Analysis**: Catalogs qualifications from resume/context only
3. **Mapping Analysis**: Identifies legitimate matches between requirements and qualifications
3. **Resume Generation**: Uses mapping output to create a tailored resume with evidence-based content
4. **Verification**: Performs fact-checking to catch any remaining fabrications
1. **Re-generation**: If verification does not pass, a second attempt is made to correct any issues`
}];
if (!hasJobDescription) {
return <Conversation
ref={jobConversationRef}
{...{
type: "job_description",
actionLabel: "Generate Resume",
preamble: jobDescriptionPreamble,
hidePreamble: true,
prompt: "Paste a job description, then click Generate...",
multiline: true,
resetLabel: `job description${hasFacts ? ", resume, and fact check" : hasResume ? " and resume" : ""}`,
@ -357,7 +331,8 @@ const ResumeBuilder: React.FC<ResumeBuilderProps> = ({
}, [connectionBase, sessionId, setSnack, factsResponse, filterFactsMessages, resetFacts, hasResume, hasFacts]);
return (
<Box sx={{
<Box className="ResumeBuilder"
sx={{
p: 0,
m: 0,
display: "flex",

View File

@ -7,6 +7,8 @@ import Box from '@mui/material/Box';
import JsonView from '@uiw/react-json-view';
import { vscodeTheme } from '@uiw/react-json-view/vscode';
import { Mermaid } from './Mermaid';
import { Scrollable } from './Scrollable';
import { jsonrepair } from 'jsonrepair';
import './StyledMarkdown.css';
@ -30,29 +32,37 @@ const StyledMarkdown: React.FC<StyledMarkdownProps> = (props: StyledMarkdownProp
return <Mermaid className="Mermaid" chart={content} />;
}
if (className === "lang-json") {
return <JsonView
style={{
...vscodeTheme,
fontSize: "0.8rem",
maxHeight: "20rem",
padding: "14px 0",
overflow: "hidden",
width: "100%",
minHeight: "max-content",
backgroundColor: "transparent",
}}
displayDataTypes={false}
objectSortKeys={false}
collapsed={false}
value={JSON.parse(content)}>
<JsonView.String
render={({ children, ...reset }) => {
if (typeof (children) === "string" && children.match("\n")) {
return <pre {...reset} style={{ display: "flex", border: "none", ...reset.style }}>{children}</pre>
}
}}
/>
</JsonView>;
try {
const fixed = jsonrepair(content);
return <Scrollable className="JsonViewScrollable">
<JsonView
className="JsonView"
style={{
...vscodeTheme,
fontSize: "0.8rem",
maxHeight: "20rem",
padding: "14px 0",
overflow: "hidden",
width: "100%",
minHeight: "max-content",
backgroundColor: "transparent",
}}
displayDataTypes={false}
objectSortKeys={false}
collapsed={false}
value={JSON.parse(fixed)}>
<JsonView.String
render={({ children, ...reset }) => {
if (typeof (children) === "string" && children.match("\n")) {
return <pre {...reset} style={{ display: "flex", border: "none", ...reset.style }}>{children}</pre>
}
}}
/>
</JsonView>
</Scrollable>
} catch (e) {
console.log("jsonrepair error", e);
};
}
return <pre><code className={className}>{element.children}</code></pre>;
},

View File

@ -18,7 +18,6 @@ import { Scrollable } from './Scrollable';
import { StyledMarkdown } from './StyledMarkdown';
import './VectorVisualizer.css';
import { calculatePoint } from 'mermaid/dist/utils';
interface Metadata {
doc_type?: string;

View File

@ -191,16 +191,25 @@ class JobDescription(Agent):
if message.status == "error":
return
# Add the "Job requirements" message
if "generate_factual_tailored_resume" in message.metadata and "job_requirements" in message.metadata["generate_factual_tailored_resume"]:
new_message = Message(prompt="Show job requirements")
job_requirements = message.metadata["generate_factual_tailored_resume"]["job_requirements"]["results"]
new_message.response = f"```json\n\n{json.dumps(job_requirements, indent=2)}\n```\n"
new_message.status = "done"
self.conversation.add(new_message)
self.system_prompt = system_user_qualifications
resume_agent = self.context.get_agent(agent_type="resume")
fact_check_agent = self.context.get_agent(agent_type="fact_check")
if not resume_agent:
# Add the "Generated Resume" message
if "generate_factual_tailored_resume" in message.metadata and "analyze_candidate_qualifications" in message.metadata["generate_factual_tailored_resume"]:
resume_agent = self.context.get_or_create_agent(agent_type="resume", resume=message.response)
resume_message = Message(prompt="Show candidate qualifications")
qualifications = message.metadata["generate_factual_tailored_resume"]["analyze_candidate_qualifications"]["results"]
resume_message.response = f"# Candidate qualifications\n\n```json\n\n{json.dumps(qualifications, indent=2)}\n```\n"
resume_message.response = f"```json\n\n{json.dumps(qualifications, indent=2)}\n```\n"
resume_message.status = "done"
resume_agent.conversation.add(resume_message)
@ -212,6 +221,8 @@ class JobDescription(Agent):
message.response = "Resume generated."
message.actions.append("resume_generated")
# Add the "Fact Check" message
if "generate_factual_tailored_resume" in message.metadata and "verify_resume" in message.metadata["generate_factual_tailored_resume"]:
if "second_pass" in message.metadata["generate_factual_tailored_resume"]["verify_resume"]:
verification = message.metadata["generate_factual_tailored_resume"]["verify_resume"]["second_pass"]["results"]
@ -221,7 +232,7 @@ class JobDescription(Agent):
fact_check_agent = self.context.get_or_create_agent(agent_type="fact_check", facts=json.dumps(verification, indent=2))
fact_check_message = message.model_copy()
fact_check_message.prompt = "Show verification"
fact_check_message.response = f"# Resume verfication\n\n```json\n\n{json.dumps(verification, indent=2)}\n```\n"
fact_check_message.response = f"```json\n\n{json.dumps(verification, indent=2)}\n```\n"
fact_check_message.status = "done"
fact_check_agent.conversation.add(fact_check_message)
@ -868,45 +879,78 @@ class JobDescription(Agent):
async def correct_resume_issues(self, message: Message, generated_resume: str, verification_results: Dict, skills_mapping: Dict, candidate_qualifications: Dict, original_header: str, metadata: Dict[str, Any]) -> AsyncGenerator[Message, None]:
"""Correct issues in the resume based on verification results."""
if verification_results["verification_results"]["overall_assessment"] == "APPROVED":
message.status = "done"
message.status = generated_resume
yield message
return
message.status = "done"
message.status = generated_resume
yield message
return
system_prompt = """
You are a professional resume editor with a focus on factual accuracy. Your task is to correct
the identified issues in a tailored resume according to the verification report.
## INSTRUCTIONS:
1. Make ONLY the changes specified in the verification report
2. Ensure all corrections maintain factual accuracy based on the skills mapping
3. Do not introduce any new claims or skills not present in the verification data
4. Maintain the original format and structure of the resume
5. DO NOT directly list the verification report or skills mapping
6. Provide ONLY the fully corrected resume
7. DO NOT provide Verification Results or other additional information beyond the corrected resume
## PROCESS:
1. For each issue in the verification report:
- Identify the problematic text in the resume
- Replace it with the suggested correction
- Ensure the correction is consistent with the rest of the resume
2. After making all corrections:
- Review the revised resume for consistency
- Ensure no factual inaccuracies have been introduced
- Check that all formatting remains professional
Return the fully corrected resume.
"""
prompt = f"Original Resume:\n{generated_resume}\n\n"
prompt += f"Verification Results:\n{json.dumps(verification_results, indent=2)}\n\n"
prompt += f"Skills Mapping:\n{json.dumps(skills_mapping, indent=2)}\n\n"
prompt += f"Candidate Qualifications:\n{json.dumps(candidate_qualifications, indent=2)}\n\n"
prompt += f"Original Resume Header:\n{original_header}"
system_prompt = """\
You are a professional resume editor with a focus on factual accuracy. Your task is to correct identified issues in a tailored resume according to the verification report.
## REFERENCE DATA:
The following sections contain reference information for you to use when making corrections. This information should NOT be included in your output resume:
1. Original Resume - The resume you will correct
2. Verification Results - Issues that need correction
3. Skills Mapping - How candidate skills align with job requirements
4. Candidate Qualifications - Verified information about the candidate's background
5. Original Resume Header - The formatting of the resume header
## INSTRUCTIONS:
1. Make ONLY the changes specified in the verification report
2. Ensure all corrections maintain factual accuracy based on the skills mapping
3. Do not introduce any new claims or skills not present in the verification data
4. Maintain the original format and structure of the resume
5. Provide ONLY the fully corrected resume as your output
6. DO NOT include any of the reference data sections in your output
7. DO NOT include any additional comments, explanations, or notes in your output
## PROCESS:
1. For each issue in the verification report:
- Identify the problematic text in the resume
- Replace it with the suggested correction
- Ensure the correction is consistent with the rest of the resume
2. After making all corrections:
- Review the revised resume for consistency
- Ensure no factual inaccuracies have been introduced
- Check that all formatting remains professional
Your output should contain ONLY the corrected resume text with no additional explanations or context.
"""
prompt = """
## REFERENCE DATA
### Original Resume:
"""
prompt += generated_resume
prompt += """
### Verification Results:
"""
prompt += json.dumps(verification_results, indent=2)
prompt += """
### Skills Mapping:
"""
prompt += json.dumps(skills_mapping, indent=2)
prompt += """
### Candidate Qualifications:
"""
prompt += json.dumps(candidate_qualifications, indent=2)
prompt += """
### Original Resume Header:
"""
prompt += generated_resume
prompt += """
## TASK
Based on the reference data above, please create a corrected version of the resume that addresses all issues identified in the verification report. Return ONLY the corrected resume.
"""
metadata["system_prompt"] = system_prompt
metadata["prompt"] = prompt