Restructured how BackstoryTextField works to not propogate changes until Enter

This commit is contained in:
James Ketr 2025-05-12 11:36:36 -07:00
parent bcb1e866cb
commit 6923132655
5 changed files with 140 additions and 128 deletions

View File

@ -1,4 +1,4 @@
import React, { useRef, useEffect, ChangeEvent, KeyboardEvent } from 'react';
import React, { useRef, useEffect, ChangeEvent, KeyboardEvent, useState } from 'react';
import { useTheme } from '@mui/material/styles';
import './BackstoryTextField.css';
@ -7,8 +7,7 @@ interface BackstoryTextFieldProps {
disabled?: boolean;
multiline?: boolean;
placeholder?: string;
onChange?: (e: ChangeEvent<HTMLTextAreaElement | HTMLInputElement>) => void;
onKeyDown?: (e: KeyboardEvent<HTMLTextAreaElement | HTMLInputElement>) => void;
onEnter: (value: string) => void;
}
const BackstoryTextField: React.FC<BackstoryTextFieldProps> = ({
@ -16,12 +15,12 @@ const BackstoryTextField: React.FC<BackstoryTextFieldProps> = ({
disabled = false,
multiline = false,
placeholder,
onChange,
onKeyDown,
onEnter
}) => {
const theme = useTheme();
const textareaRef = useRef<HTMLTextAreaElement>(null);
const shadowRef = useRef<HTMLTextAreaElement>(null);
const [editValue, setEditValue] = useState<string>(value);
useEffect(() => {
if (multiline && textareaRef.current && shadowRef.current) {
@ -43,15 +42,10 @@ const BackstoryTextField: React.FC<BackstoryTextFieldProps> = ({
}
}, [value, multiline, textareaRef, shadowRef, placeholder]);
const handleChange = (e: ChangeEvent<HTMLTextAreaElement | HTMLInputElement>) => {
if (onChange) {
onChange(e);
}
};
const handleKeyDown = (e: KeyboardEvent<HTMLTextAreaElement | HTMLInputElement>) => {
if (onKeyDown) {
onKeyDown(e);
const handleKeyDown = (event: KeyboardEvent<HTMLTextAreaElement | HTMLInputElement>) => {
if (event.key === 'Enter' && (!multiline || !event.shiftKey)) {
setEditValue('');
onEnter(event.currentTarget.value);
}
};
@ -74,10 +68,10 @@ const BackstoryTextField: React.FC<BackstoryTextFieldProps> = ({
<input
className="BackstoryTextField"
type="text"
value={value}
value={editValue}
disabled={disabled}
placeholder={placeholder}
onChange={handleChange}
onChange={(e) => { setEditValue(e.target.value); }}
onKeyDown={handleKeyDown}
style={sharedStyle}
/>
@ -89,10 +83,10 @@ const BackstoryTextField: React.FC<BackstoryTextFieldProps> = ({
<textarea
className="BackstoryTextField"
ref={textareaRef}
value={value}
value={editValue}
disabled={disabled}
placeholder={placeholder}
onChange={handleChange}
onChange={(e) => { setEditValue(e.target.value); }}
onKeyDown={handleKeyDown}
style={{
...sharedStyle,

View File

@ -246,10 +246,8 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>(({
}
};
const handleKeyPress = (event: any) => {
if (event.key === 'Enter' && !event.shiftKey) {
sendQuery(query);
}
const handleEnter = (value: string) => {
sendQuery(value);
};
useImperativeHandle(ref, () => ({
@ -553,8 +551,7 @@ const Conversation = forwardRef<ConversationHandle, ConversationProps>(({
disabled={processing}
multiline={multiline ? true : false}
value={query}
onChange={(e: any) => setQuery(e.target.value)}
onKeyDown={handleKeyPress}
onEnter={handleEnter}
placeholder={prompt}
/>
</div>

View File

@ -287,7 +287,7 @@ const Message = (props: MessageProps) => {
overflow: "auto", /* Handles scrolling for the div */
}}
>
<StyledMarkdown {...{ content: formattedContent, submitQuery, sessionId, setSnack }} />
<StyledMarkdown streaming={message.role === "streaming"} {...{ content: formattedContent, submitQuery, sessionId, setSnack }} />
</Scrollable>
:
<Typography

View File

@ -17,10 +17,11 @@ interface StyledMarkdownProps extends BackstoryElementProps {
className?: string,
content: string,
sx?: SxProps,
streaming?: boolean,
};
const StyledMarkdown: React.FC<StyledMarkdownProps> = (props: StyledMarkdownProps) => {
const { className, content, submitQuery, sx } = props;
const { className, content, submitQuery, sx, streaming } = props;
const theme = useTheme();
const overrides: any = {
@ -28,16 +29,16 @@ const StyledMarkdown: React.FC<StyledMarkdownProps> = (props: StyledMarkdownProp
component: (element: any) => {
const { className } = element.children.props;
const content = element.children?.props?.children || "";
if (className === "lang-mermaid") {
if (className === "lang-mermaid" && !streaming) {
return <Mermaid className="Mermaid" chart={content} />;
}
if (className === "lang-markdown") {
return <MuiMarkdown children={content} />;
}
if (className === "lang-json") {
if (className === "lang-json" && !streaming) {
try {
const fixed = jsonrepair(content);
return <Scrollable autoscroll className="JsonViewScrollable">
return <Scrollable className="JsonViewScrollable">
<JsonView
className="JsonView"
style={{

View File

@ -8,6 +8,7 @@ import json
import traceback
import asyncio
import time
from collections import defaultdict
from . base import Agent, agent_registry, LLMMessage
from .. conversation import Conversation
@ -419,32 +420,60 @@ class JobDescription(Agent):
metadata["error"] = message.response
raise
def format_rag_context(self, rag_results: List[Dict[str, Any]]) -> str:
"""
Format RAG results from process_job_requirements into a structured string.
Args:
rag_results: List of dictionaries from process_job_requirements.
Returns:
A formatted string for inclusion in the prompt.
"""
if not rag_results:
return "No additional context available."
# Group results by category and subcategory
grouped_context = defaultdict(list)
for result in rag_results:
key = f"{result['category']}/{result['subcategory']}".strip("/")
grouped_context[key].append({
"query": result["context"],
"content": result["content"][:100] + "..." if len(result["content"]) > 100 else result["content"]
})
# Format as a structured string
context_lines = ["Additional Context from Document Retrieval:"]
for category, items in grouped_context.items():
context_lines.append(f"\nCategory: {category}")
for item in items:
context_lines.append(f"- Query: {item['query']}")
context_lines.append(f" Relevant Document: {item['content']}")
return "\n".join(context_lines)
# Stage 1B: Candidate Analysis Implementation
def create_candidate_analysis_prompt(self, resume: str, context: str) -> tuple[str, str]:
def create_candidate_analysis_prompt(self, resume: str, rag_results: List[Dict[str, Any]]) -> tuple[str, str]:
"""Create the prompt for candidate qualifications analysis."""
# system_prompt = """
# You are an objective resume analyzer. Your task is to catalog ALL skills, experiences, and qualifications
# present in a candidate's materials WITHOUT any reference to any job description.
# ## INSTRUCTIONS:
# 1. Analyze ONLY the candidate's resume and context provided.
# 2. Create a comprehensive inventory of the candidate's actual qualifications.
# 3. DO NOT consider any job requirements - this is a pure candidate analysis task.
# 4. For each qualification, cite exactly where in the materials it appears.
# 5. DO NOT duplicate or repeat time periods or skills once listed.
# ## OUTPUT FORMAT:
# You are an objective resume analyzer. Create a comprehensive inventory of all skills, experiences, and qualifications present in the candidate's materials.
# CORE PRINCIPLES:
# - Analyze ONLY the candidate's resume and provided context
# - Focus ONLY on the candidate's actual qualifications
# - Do not reference any job requirements
# - Include only explicitly mentioned information
# OUTPUT FORMAT:
# ```json
# {
# "candidate_qualifications": {
# "technical_skills": [
# {
# "skill": "skill name",
# "evidence": "exact quote from materials",
# "source": "resume or context",
# "expertise_level": "explicit level mentioned or 'unspecified'"
# "evidence_location": "where in resume this appears",
# "expertise_level": "stated level or 'unspecified'"
# }
# ],
# "work_experience": [
@ -462,88 +491,81 @@ class JobDescription(Agent):
# "degree": "degree name",
# "institution": "institution name",
# "completed": true/false,
# "evidence": "exact quote from materials"
# "graduation_date": "date or 'ongoing'"
# }
# ],
# "projects": [
# {
# "name": "project name",
# "description": "brief description",
# "technologies_used": ["tech1", "tech2"],
# "evidence": "exact quote from materials"
# "technologies_used": ["tech1", "tech2"]
# }
# ],
# "soft_skills": [
# {
# "skill": "skill name",
# "evidence": "exact quote or inference basis",
# "source": "resume or context"
# "context": "brief mention of where this appears"
# }
# ]
# }
# }
# ```
# Be thorough and precise. Include ONLY skills and experiences explicitly mentioned in the materials.
# For each entry, provide the exact text evidence from the materials that supports its inclusion.
# Do not make assumptions about skills based on job titles or project names - only include skills explicitly mentioned.
# """
system_prompt = """\
You are an objective resume analyzer. Create a comprehensive inventory of all skills, experiences, and qualifications present in the candidate's materials.
system_prompt = """
You are an objective resume analyzer. Create a comprehensive inventory of all skills, experiences, and qualifications present in the candidate's materials.
CORE PRINCIPLES:
- Analyze ONLY the candidate's resume and provided context.
- Focus ONLY on the candidate's actual qualifications explicitly mentioned in the resume.
- Use the additional context to clarify or provide background for terms, skills, or experiences mentioned in the resume (e.g., to understand the scope of a skill like 'Python' or a role's responsibilities).
- Do NOT treat the context as job requirements or infer qualifications not explicitly stated in the resume.
- Include only explicitly mentioned information from the resume, supplemented by context where relevant.
CORE PRINCIPLES:
- Analyze ONLY the candidate's resume and provided context
- Focus ONLY on the candidate's actual qualifications
- Do not reference any job requirements
- Include only explicitly mentioned information
OUTPUT FORMAT:
```json
OUTPUT FORMAT:
```json
{
"candidate_qualifications": {
"technical_skills": [
{
"candidate_qualifications": {
"technical_skills": [
{
"skill": "skill name",
"evidence_location": "where in resume this appears",
"expertise_level": "stated level or 'unspecified'"
}
],
"work_experience": [
{
"role": "job title",
"company": "company name",
"duration": "time period",
"responsibilities": ["resp1", "resp2"],
"technologies_used": ["tech1", "tech2"],
"achievements": ["achievement1", "achievement2"]
}
],
"education": [
{
"degree": "degree name",
"institution": "institution name",
"completed": true/false,
"graduation_date": "date or 'ongoing'"
}
],
"projects": [
{
"name": "project name",
"description": "brief description",
"technologies_used": ["tech1", "tech2"]
}
],
"soft_skills": [
{
"skill": "skill name",
"context": "brief mention of where this appears"
}
]
}
"skill": "skill name",
"evidence_location": "where in resume this appears",
"expertise_level": "stated level or 'unspecified'"
}
"""
],
"work_experience": [
{
"role": "job title",
"company": "company name",
"duration": "time period",
"responsibilities": ["resp1", "resp2"],
"technologies_used": ["tech1", "tech2"],
"achievements": ["achievement1", "achievement2"]
}
],
"education": [
{
"degree": "degree name",
"institution": "institution name",
"completed": true/false,
"graduation_date": "date or 'ongoing'"
}
],
"projects": [
{
"name": "project name",
"description": "brief description",
"technologies_used": ["tech1", "tech2"]
}
],
"soft_skills": [
{
"skill": "skill name",
"context": "brief mention of where this appears"
}
]
}
}
"""
context = self.format_rag_context(rag_results)
prompt = f"Resume:\n{resume}\n\nAdditional Context:\n{context}"
return system_prompt, prompt
@ -602,10 +624,10 @@ class JobDescription(Agent):
message.status = "done"
yield message
async def analyze_candidate_qualifications(self, message: Message, resume: str, additional_context: str, metadata: Dict[str, Any]) -> AsyncGenerator[Message, None]:
async def analyze_candidate_qualifications(self, message: Message, resume: str, rag_context: List[Dict[str, Any]], metadata: Dict[str, Any]) -> AsyncGenerator[Message, None]:
"""Analyze candidate qualifications from resume and context."""
try:
system_prompt, prompt = self.create_candidate_analysis_prompt(resume, additional_context)
system_prompt, prompt = self.create_candidate_analysis_prompt(resume, rag_context)
metadata["system_prompt"] = system_prompt
metadata["prompt"] = prompt
async for message in self.call_llm(message, system_prompt, prompt):
@ -1048,7 +1070,7 @@ Based on the reference data above, please create a corrected version of the resu
# Call find_similar for the item
try:
rag_results = retriever.find_similar(item, top_k=10, threshold=0.7)
rag_results = retriever.find_similar(item, top_k=20, threshold=0.4) # Strict matching
# Process each result
for doc_id, content, distance, metadata in zip(
rag_results["ids"],
@ -1089,21 +1111,22 @@ Based on the reference data above, please create a corrected version of the resu
reverse=True
)
# Return top 20 results
return sorted_results[:20]
# Return top 10 results
return sorted_results[:10]
async def generate_rag_content(self, message: Message, job_requirements: Dict[str, Any]) -> AsyncGenerator[Message, None]:
results = self.process_job_requirements(job_requirements = job_requirements)
message.response = f"Retrieved {len(results)} documents:\n"
for result in results:
message.response += f"""
ID: {result['id']}, Context: {result['context']}, \
Category: {result['category']}/{result['subcategory']}, \
Similarity Score: {result['score']:.3f}, \
Combined Score: {result['weight'] * result['score']:.3f}, \
Content: {result['content']}
"""
message.response = f"Retrieved {len(results)} documents."
message.metadata["rag_context"] = results
# for result in results:
# message.response += f"""
# ID: {result['id']}, Context: {result['context']}, \
# Category: {result['category']}/{result['subcategory']}, \
# Similarity Score: {result['score']:.3f}, \
# Combined Score: {result['weight'] * result['score']:.3f}, \
# Content: {result['content']}
# """
message.status = "done"
yield message
return
@ -1153,17 +1176,14 @@ Content: {result['content']}
if message.status == "error":
return
yield message
return
additional_context = message.preamble["context"]
rag_context = message.metadata["rag_context"]
metadata["analyze_candidate_qualifications"] = {
"additional_context": additional_context
"rag_context": rag_context
}
async for message in self.analyze_candidate_qualifications(
message=message,
resume=resume,
additional_context=additional_context,
rag_context=rag_context,
metadata=metadata["analyze_candidate_qualifications"]):
if message.status != "done":
yield message