Compare commits
3 Commits
05c53653ed
...
d9a0267cfa
Author | SHA1 | Date | |
---|---|---|---|
d9a0267cfa | |||
7586725f11 | |||
cb97cabfc3 |
@ -20,58 +20,101 @@ import CheckCircleIcon from '@mui/icons-material/CheckCircle';
|
|||||||
import ErrorIcon from '@mui/icons-material/Error';
|
import ErrorIcon from '@mui/icons-material/Error';
|
||||||
import PendingIcon from '@mui/icons-material/Pending';
|
import PendingIcon from '@mui/icons-material/Pending';
|
||||||
import WarningIcon from '@mui/icons-material/Warning';
|
import WarningIcon from '@mui/icons-material/Warning';
|
||||||
|
import { Candidate, ChatMessage, ChatMessageBase, ChatMessageUser, ChatSession, JobRequirements, SkillMatch } from 'types/types';
|
||||||
|
import { useAuth } from 'hooks/AuthContext';
|
||||||
|
import { BackstoryPageProps } from './BackstoryTab';
|
||||||
|
import { toCamelCase } from 'types/conversion';
|
||||||
|
|
||||||
// Define TypeScript interfaces for our data structures
|
|
||||||
interface Citation {
|
interface Job {
|
||||||
text: string;
|
title: string;
|
||||||
source: string;
|
description: string;
|
||||||
relevance: number; // 0-100 scale
|
|
||||||
}
|
}
|
||||||
|
|
||||||
interface SkillMatch {
|
interface JobAnalysisProps extends BackstoryPageProps {
|
||||||
requirement: string;
|
job: Job;
|
||||||
status: 'pending' | 'complete' | 'error';
|
candidate: Candidate;
|
||||||
matchScore: number; // 0-100 scale
|
|
||||||
assessment: string;
|
|
||||||
citations: Citation[];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
interface JobAnalysisProps {
|
const defaultMessage: ChatMessageUser = {
|
||||||
jobTitle: string;
|
type: "preparing", status: "done", sender: "user", sessionId: "", timestamp: new Date(), content: ""
|
||||||
candidateName: string;
|
};
|
||||||
// This function would connect to your backend and return updates
|
|
||||||
fetchRequirements: () => Promise<string[]>;
|
|
||||||
// This function would fetch match data for a specific requirement
|
|
||||||
fetchMatchForRequirement: (requirement: string) => Promise<SkillMatch>;
|
|
||||||
}
|
|
||||||
|
|
||||||
const JobMatchAnalysis: React.FC<JobAnalysisProps> = ({
|
const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) => {
|
||||||
jobTitle,
|
const {
|
||||||
candidateName,
|
job,
|
||||||
fetchRequirements,
|
candidate,
|
||||||
fetchMatchForRequirement
|
setSnack,
|
||||||
}) => {
|
} = props
|
||||||
|
const { apiClient } = useAuth();
|
||||||
const theme = useTheme();
|
const theme = useTheme();
|
||||||
|
const [jobRequirements, setJobRequirements] = useState<JobRequirements | null>(null);
|
||||||
const [requirements, setRequirements] = useState<string[]>([]);
|
const [requirements, setRequirements] = useState<string[]>([]);
|
||||||
const [skillMatches, setSkillMatches] = useState<SkillMatch[]>([]);
|
const [skillMatches, setSkillMatches] = useState<SkillMatch[]>([]);
|
||||||
const [loadingRequirements, setLoadingRequirements] = useState<boolean>(true);
|
const [creatingSession, setCreatingSession] = useState<boolean>(false);
|
||||||
|
const [loadingRequirements, setLoadingRequirements] = useState<boolean>(false);
|
||||||
const [expanded, setExpanded] = useState<string | false>(false);
|
const [expanded, setExpanded] = useState<string | false>(false);
|
||||||
const [overallScore, setOverallScore] = useState<number>(0);
|
const [overallScore, setOverallScore] = useState<number>(0);
|
||||||
|
const [requirementsSession, setRequirementsSession] = useState<ChatSession | null>(null);
|
||||||
|
const [statusMessage, setStatusMessage] = useState<ChatMessage | null>(null);
|
||||||
|
|
||||||
// Handle accordion expansion
|
// Handle accordion expansion
|
||||||
const handleAccordionChange = (panel: string) => (event: React.SyntheticEvent, isExpanded: boolean) => {
|
const handleAccordionChange = (panel: string) => (event: React.SyntheticEvent, isExpanded: boolean) => {
|
||||||
setExpanded(isExpanded ? panel : false);
|
setExpanded(isExpanded ? panel : false);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (requirementsSession || creatingSession) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const createSession = async () => {
|
||||||
|
try {
|
||||||
|
const session: ChatSession = await apiClient.createCandidateChatSession(
|
||||||
|
candidate.username,
|
||||||
|
'job_requirements',
|
||||||
|
`Generate requirements for ${job.title}`
|
||||||
|
);
|
||||||
|
setSnack("Job analysis session started");
|
||||||
|
setRequirementsSession(session);
|
||||||
|
} catch (error) {
|
||||||
|
console.log(error);
|
||||||
|
setSnack("Unable to create requirements session", "error");
|
||||||
|
}
|
||||||
|
setCreatingSession(false);
|
||||||
|
};
|
||||||
|
setCreatingSession(true);
|
||||||
|
createSession();
|
||||||
|
}, [requirementsSession, apiClient, candidate]);
|
||||||
|
|
||||||
// Fetch initial requirements
|
// Fetch initial requirements
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
|
if (!job.description || !requirementsSession || loadingRequirements || jobRequirements) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
const getRequirements = async () => {
|
const getRequirements = async () => {
|
||||||
|
setLoadingRequirements(true);
|
||||||
try {
|
try {
|
||||||
const fetchedRequirements = await fetchRequirements();
|
const chatMessage: ChatMessageUser = { ...defaultMessage, sessionId: requirementsSession.id || '', content: job.description };
|
||||||
setRequirements(fetchedRequirements);
|
apiClient.sendMessageStream(chatMessage, {
|
||||||
|
onMessage: (msg: ChatMessage) => {
|
||||||
|
console.log(`onMessage: ${msg.type}`, msg);
|
||||||
|
if (msg.type === "response") {
|
||||||
|
const incoming: any = toCamelCase<JobRequirements>(JSON.parse(msg.content || ''));
|
||||||
|
const requirements: string[] = ['technicalSkills', 'experienceRequirements'].flatMap((type) => {
|
||||||
|
return ['required', 'preferred'].flatMap((level) => {
|
||||||
|
return incoming[type][level].map((s: string) => s);
|
||||||
|
})
|
||||||
|
});
|
||||||
|
['softSkills', 'experience', 'education', 'certifications', 'preferredAttributes'].forEach(l => {
|
||||||
|
if (incoming[l]) {
|
||||||
|
incoming[l].forEach((s: string) => requirements.push(s));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// Initialize skill matches with pending status
|
// Initialize skill matches with pending status
|
||||||
const initialSkillMatches = fetchedRequirements.map(req => ({
|
const initialSkillMatches = requirements.map(req => ({
|
||||||
requirement: req,
|
requirement: req,
|
||||||
status: 'pending' as const,
|
status: 'pending' as const,
|
||||||
matchScore: 0,
|
matchScore: 0,
|
||||||
@ -79,16 +122,42 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = ({
|
|||||||
citations: []
|
citations: []
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
setRequirements(requirements);
|
||||||
setSkillMatches(initialSkillMatches);
|
setSkillMatches(initialSkillMatches);
|
||||||
|
setStatusMessage(null);
|
||||||
setLoadingRequirements(false);
|
setLoadingRequirements(false);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
onError: (error: string | ChatMessageBase) => {
|
||||||
|
console.log("onError:", error);
|
||||||
|
// Type-guard to determine if this is a ChatMessageBase or a string
|
||||||
|
if (typeof error === "object" && error !== null && "content" in error) {
|
||||||
|
setSnack(error.content || 'Error obtaining requirements from job description.', "error");
|
||||||
|
} else {
|
||||||
|
setSnack(error as string, "error");
|
||||||
|
}
|
||||||
|
setLoadingRequirements(false);
|
||||||
|
},
|
||||||
|
onStreaming: (chunk: ChatMessageBase) => {
|
||||||
|
// console.log("onStreaming:", chunk);
|
||||||
|
},
|
||||||
|
onStatusChange: (status: string) => {
|
||||||
|
console.log(`onStatusChange: ${status}`);
|
||||||
|
},
|
||||||
|
onComplete: () => {
|
||||||
|
console.log("onComplete");
|
||||||
|
setStatusMessage(null);
|
||||||
|
setLoadingRequirements(false);
|
||||||
|
}
|
||||||
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Error fetching requirements:", error);
|
console.error('Failed to send message:', error);
|
||||||
setLoadingRequirements(false);
|
setLoadingRequirements(false);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
getRequirements();
|
getRequirements();
|
||||||
}, [fetchRequirements]);
|
}, [job, requirementsSession]);
|
||||||
|
|
||||||
// Fetch match data for each requirement
|
// Fetch match data for each requirement
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@ -98,8 +167,8 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = ({
|
|||||||
// Process requirements one by one
|
// Process requirements one by one
|
||||||
for (let i = 0; i < requirements.length; i++) {
|
for (let i = 0; i < requirements.length; i++) {
|
||||||
try {
|
try {
|
||||||
const match = await fetchMatchForRequirement(requirements[i]);
|
const match: SkillMatch = await apiClient.candidateMatchForRequirement(candidate.id || '', requirements[i]);
|
||||||
|
console.log(match);
|
||||||
setSkillMatches(prev => {
|
setSkillMatches(prev => {
|
||||||
const updated = [...prev];
|
const updated = [...prev];
|
||||||
updated[i] = match;
|
updated[i] = match;
|
||||||
@ -133,7 +202,7 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = ({
|
|||||||
if (!loadingRequirements) {
|
if (!loadingRequirements) {
|
||||||
fetchMatchData();
|
fetchMatchData();
|
||||||
}
|
}
|
||||||
}, [requirements, loadingRequirements, fetchMatchForRequirement]);
|
}, [requirements, loadingRequirements]);
|
||||||
|
|
||||||
// Get color based on match score
|
// Get color based on match score
|
||||||
const getMatchColor = (score: number): string => {
|
const getMatchColor = (score: number): string => {
|
||||||
@ -165,13 +234,13 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = ({
|
|||||||
|
|
||||||
<Grid size={{ xs: 12, md: 6 }}>
|
<Grid size={{ xs: 12, md: 6 }}>
|
||||||
<Typography variant="h6" component="h2">
|
<Typography variant="h6" component="h2">
|
||||||
Job: {jobTitle}
|
Job: {job.title}
|
||||||
</Typography>
|
</Typography>
|
||||||
</Grid>
|
</Grid>
|
||||||
|
|
||||||
<Grid size={{ xs: 12, md: 6 }}>
|
<Grid size={{ xs: 12, md: 6 }}>
|
||||||
<Typography variant="h6" component="h2">
|
<Typography variant="h6" component="h2">
|
||||||
Candidate: {candidateName}
|
Candidate: {candidate.fullName}
|
||||||
</Typography>
|
</Typography>
|
||||||
</Grid>
|
</Grid>
|
||||||
|
|
||||||
@ -329,7 +398,7 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = ({
|
|||||||
Supporting Evidence:
|
Supporting Evidence:
|
||||||
</Typography>
|
</Typography>
|
||||||
|
|
||||||
{match.citations.length > 0 ? (
|
{match.citations && match.citations.length > 0 ? (
|
||||||
match.citations.map((citation, citIndex) => (
|
match.citations.map((citation, citIndex) => (
|
||||||
<Card
|
<Card
|
||||||
key={citIndex}
|
key={citIndex}
|
||||||
|
@ -1,153 +0,0 @@
|
|||||||
import React from 'react';
|
|
||||||
import { JobMatchAnalysis } from '../components/JobMatchAnalysis';
|
|
||||||
|
|
||||||
// Mock data and functions to simulate your backend
|
|
||||||
const mockRequirements = [
|
|
||||||
"5+ years of React development experience",
|
|
||||||
"Strong TypeScript skills",
|
|
||||||
"Experience with RESTful APIs",
|
|
||||||
"Knowledge of state management solutions (Redux, Context API)",
|
|
||||||
"Experience with CI/CD pipelines",
|
|
||||||
"Cloud platform experience (AWS, Azure, GCP)"
|
|
||||||
];
|
|
||||||
|
|
||||||
// Simulates fetching requirements with a delay
|
|
||||||
const mockFetchRequirements = async (): Promise<string[]> => {
|
|
||||||
return new Promise((resolve) => {
|
|
||||||
setTimeout(() => {
|
|
||||||
resolve(mockRequirements);
|
|
||||||
}, 1500); // Simulate network delay
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
// Simulates fetching match data for a requirement with varying delays
|
|
||||||
const mockFetchMatchForRequirement = async (requirement: string): Promise<any> => {
|
|
||||||
// Create different mock responses based on the requirement
|
|
||||||
const mockResponses: Record<string, any> = {
|
|
||||||
"5+ years of React development experience": {
|
|
||||||
requirement: "5+ years of React development experience",
|
|
||||||
status: "complete",
|
|
||||||
matchScore: 85,
|
|
||||||
assessment: "The candidate demonstrates extensive React experience spanning over 6 years, with a strong portfolio of complex applications and deep understanding of React's component lifecycle and hooks.",
|
|
||||||
citations: [
|
|
||||||
{
|
|
||||||
text: "Led frontend development team of 5 engineers to rebuild our customer portal using React and TypeScript, resulting in 40% improved performance and 30% reduction in bugs.",
|
|
||||||
source: "Resume, Work Experience",
|
|
||||||
relevance: 95
|
|
||||||
},
|
|
||||||
{
|
|
||||||
text: "Developed and maintained reusable React component library used across 12 different products within the organization.",
|
|
||||||
source: "Resume, Work Experience",
|
|
||||||
relevance: 90
|
|
||||||
},
|
|
||||||
{
|
|
||||||
text: "I've been working with React since 2017, building everything from small widgets to enterprise applications.",
|
|
||||||
source: "Cover Letter",
|
|
||||||
relevance: 85
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Strong TypeScript skills": {
|
|
||||||
requirement: "Strong TypeScript skills",
|
|
||||||
status: "complete",
|
|
||||||
matchScore: 90,
|
|
||||||
assessment: "The candidate shows excellent TypeScript proficiency through their work history and personal projects. They have implemented complex type systems and demonstrate an understanding of advanced TypeScript features.",
|
|
||||||
citations: [
|
|
||||||
{
|
|
||||||
text: "Converted a legacy JavaScript codebase of 100,000+ lines to TypeScript, implementing strict type checking and reducing runtime errors by 70%.",
|
|
||||||
source: "Resume, Projects",
|
|
||||||
relevance: 98
|
|
||||||
},
|
|
||||||
{
|
|
||||||
text: "Created comprehensive TypeScript interfaces for our GraphQL API, ensuring type safety across the entire application stack.",
|
|
||||||
source: "Resume, Technical Skills",
|
|
||||||
relevance: 95
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Experience with RESTful APIs": {
|
|
||||||
requirement: "Experience with RESTful APIs",
|
|
||||||
status: "complete",
|
|
||||||
matchScore: 75,
|
|
||||||
assessment: "The candidate has good experience with RESTful APIs, having both consumed and designed them. They understand REST principles but have less documented experience with API versioning and caching strategies.",
|
|
||||||
citations: [
|
|
||||||
{
|
|
||||||
text: "Designed and implemented a RESTful API serving over 1M requests daily with a focus on performance and scalability.",
|
|
||||||
source: "Resume, Technical Projects",
|
|
||||||
relevance: 85
|
|
||||||
},
|
|
||||||
{
|
|
||||||
text: "Worked extensively with third-party APIs including Stripe, Twilio, and Salesforce to integrate payment processing and communication features.",
|
|
||||||
source: "Resume, Work Experience",
|
|
||||||
relevance: 70
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Knowledge of state management solutions (Redux, Context API)": {
|
|
||||||
requirement: "Knowledge of state management solutions (Redux, Context API)",
|
|
||||||
status: "complete",
|
|
||||||
matchScore: 65,
|
|
||||||
assessment: "The candidate has moderate experience with state management, primarily using Redux. There is less evidence of Context API usage, which could indicate a knowledge gap in more modern React state management approaches.",
|
|
||||||
citations: [
|
|
||||||
{
|
|
||||||
text: "Implemented Redux for global state management in an e-commerce application, handling complex state logic for cart, user preferences, and product filtering.",
|
|
||||||
source: "Resume, Skills",
|
|
||||||
relevance: 80
|
|
||||||
},
|
|
||||||
{
|
|
||||||
text: "My experience includes working with state management libraries like Redux and MobX.",
|
|
||||||
source: "Cover Letter",
|
|
||||||
relevance: 60
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Experience with CI/CD pipelines": {
|
|
||||||
requirement: "Experience with CI/CD pipelines",
|
|
||||||
status: "complete",
|
|
||||||
matchScore: 40,
|
|
||||||
assessment: "The candidate shows limited experience with CI/CD pipelines. While they mention some exposure to Jenkins and GitLab CI, there is insufficient evidence of setting up or maintaining comprehensive CI/CD workflows.",
|
|
||||||
citations: [
|
|
||||||
{
|
|
||||||
text: "Familiar with CI/CD tools including Jenkins and GitLab CI.",
|
|
||||||
source: "Resume, Skills",
|
|
||||||
relevance: 40
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Cloud platform experience (AWS, Azure, GCP)": {
|
|
||||||
requirement: "Cloud platform experience (AWS, Azure, GCP)",
|
|
||||||
status: "complete",
|
|
||||||
matchScore: 30,
|
|
||||||
assessment: "The candidate demonstrates minimal experience with cloud platforms. There is a brief mention of AWS S3 and Lambda, but no substantial evidence of deeper cloud architecture knowledge or experience with Azure or GCP.",
|
|
||||||
citations: [
|
|
||||||
{
|
|
||||||
text: "Used AWS S3 for file storage and Lambda for image processing in a photo sharing application.",
|
|
||||||
source: "Resume, Projects",
|
|
||||||
relevance: 35
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Return a promise that resolves with the mock data after a delay
|
|
||||||
return new Promise((resolve) => {
|
|
||||||
// Different requirements resolve at different speeds to simulate real-world analysis
|
|
||||||
const delay = Math.random() * 5000 + 2000; // 2-7 seconds
|
|
||||||
setTimeout(() => {
|
|
||||||
resolve(mockResponses[requirement]);
|
|
||||||
}, delay);
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
const DemoComponent: React.FC = () => {
|
|
||||||
return (
|
|
||||||
<JobMatchAnalysis
|
|
||||||
jobTitle="Senior Frontend Developer"
|
|
||||||
candidateName="Alex Johnson"
|
|
||||||
fetchRequirements={mockFetchRequirements}
|
|
||||||
fetchMatchForRequirement={mockFetchMatchForRequirement}
|
|
||||||
/>
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
export { DemoComponent };
|
|
@ -187,11 +187,11 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
|||||||
controllerRef.current = apiClient.sendMessageStream(chatMessage, {
|
controllerRef.current = apiClient.sendMessageStream(chatMessage, {
|
||||||
onMessage: async (msg: ChatMessage) => {
|
onMessage: async (msg: ChatMessage) => {
|
||||||
console.log(`onMessage: ${msg.type} ${msg.content}`, msg);
|
console.log(`onMessage: ${msg.type} ${msg.content}`, msg);
|
||||||
if (msg.type === "heartbeat") {
|
if (msg.type === "heartbeat" && msg.content) {
|
||||||
const heartbeat = JSON.parse(msg.content);
|
const heartbeat = JSON.parse(msg.content);
|
||||||
setTimestamp(heartbeat.timestamp);
|
setTimestamp(heartbeat.timestamp);
|
||||||
}
|
}
|
||||||
if (msg.type === "thinking") {
|
if (msg.type === "thinking" && msg.content) {
|
||||||
const status = JSON.parse(msg.content);
|
const status = JSON.parse(msg.content);
|
||||||
setProcessingMessage({ ...defaultMessage, content: status.message });
|
setProcessingMessage({ ...defaultMessage, content: status.message });
|
||||||
}
|
}
|
||||||
|
@ -39,13 +39,16 @@ import { useNavigate } from 'react-router-dom';
|
|||||||
import { BackstoryPageProps } from 'components/BackstoryTab';
|
import { BackstoryPageProps } from 'components/BackstoryTab';
|
||||||
import { useAuth } from 'hooks/AuthContext';
|
import { useAuth } from 'hooks/AuthContext';
|
||||||
import { useSelectedCandidate } from 'hooks/GlobalContext';
|
import { useSelectedCandidate } from 'hooks/GlobalContext';
|
||||||
|
import { CandidateInfo } from 'components/CandidateInfo';
|
||||||
|
import { ComingSoon } from 'components/ui/ComingSoon';
|
||||||
|
|
||||||
// Main component
|
// Main component
|
||||||
const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps) => {
|
const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps) => {
|
||||||
const theme = useTheme();
|
const theme = useTheme();
|
||||||
const { user } = useAuth();
|
const { user } = useAuth();
|
||||||
const { selectedCandidate, setSelectedCandidate } = useSelectedCandidate()
|
const { selectedCandidate, setSelectedCandidate } = useSelectedCandidate()
|
||||||
|
const { setSnack, submitQuery } = props;
|
||||||
|
const backstoryProps = { setSnack, submitQuery };
|
||||||
// State management
|
// State management
|
||||||
const [activeStep, setActiveStep] = useState(0);
|
const [activeStep, setActiveStep] = useState(0);
|
||||||
const [jobDescription, setJobDescription] = useState('');
|
const [jobDescription, setJobDescription] = useState('');
|
||||||
@ -55,7 +58,6 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
|
|||||||
const [error, setError] = useState<string | null>(null);
|
const [error, setError] = useState<string | null>(null);
|
||||||
const [openUploadDialog, setOpenUploadDialog] = useState(false);
|
const [openUploadDialog, setOpenUploadDialog] = useState(false);
|
||||||
const { apiClient } = useAuth();
|
const { apiClient } = useAuth();
|
||||||
const { setSnack } = props;
|
|
||||||
const [candidates, setCandidates] = useState<Candidate[] | null>(null);
|
const [candidates, setCandidates] = useState<Candidate[] | null>(null);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@ -93,32 +95,14 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
|
|||||||
}, [selectedCandidate, activeStep]);
|
}, [selectedCandidate, activeStep]);
|
||||||
|
|
||||||
// Steps in our process
|
// Steps in our process
|
||||||
const steps = selectedCandidate === null ? [
|
const steps = [
|
||||||
{ index: 0, label: 'Select Candidate', icon: <PersonIcon /> },
|
|
||||||
{ index: 1, label: 'Job Description', icon: <WorkIcon /> },
|
{ index: 1, label: 'Job Description', icon: <WorkIcon /> },
|
||||||
{ index: 2, label: 'View Analysis', icon: <AssessmentIcon /> }
|
{ index: 2, label: 'AI Analysis', icon: <WorkIcon /> },
|
||||||
] : [
|
{ index: 3, label: 'Generated Resume', icon: <AssessmentIcon /> }
|
||||||
{ index: 1, label: 'Job Description', icon: <WorkIcon /> },
|
|
||||||
{ index: 2, label: 'View Analysis', icon: <AssessmentIcon /> }
|
|
||||||
];
|
];
|
||||||
|
if (!selectedCandidate) {
|
||||||
// Mock handlers for our analysis APIs
|
steps.unshift({ index: 0, label: 'Select Candidate', icon: <PersonIcon /> })
|
||||||
const fetchRequirements = async (): Promise<string[]> => {
|
}
|
||||||
// Simulates extracting requirements from the job description
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
||||||
|
|
||||||
// This would normally parse the job description to extract requirements
|
|
||||||
const mockRequirements = [
|
|
||||||
"5+ years of React development experience",
|
|
||||||
"Strong TypeScript skills",
|
|
||||||
"Experience with RESTful APIs",
|
|
||||||
"Knowledge of state management solutions (Redux, Context API)",
|
|
||||||
"Experience with CI/CD pipelines",
|
|
||||||
"Cloud platform experience (AWS, Azure, GCP)"
|
|
||||||
];
|
|
||||||
|
|
||||||
return mockRequirements;
|
|
||||||
};
|
|
||||||
|
|
||||||
const fetchMatchForRequirement = async (requirement: string): Promise<any> => {
|
const fetchMatchForRequirement = async (requirement: string): Promise<any> => {
|
||||||
// Create different mock responses based on the requirement
|
// Create different mock responses based on the requirement
|
||||||
@ -245,10 +229,12 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (activeStep === 1 && (/*(extraInfo && !jobTitle) || */!jobDescription)) {
|
if (activeStep === 1) {
|
||||||
setError('Please provide both job title and description before continuing.');
|
if ((/*(extraInfo && !jobTitle) || */!jobDescription)) {
|
||||||
|
setError('Please provide job description before continuing.');
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (activeStep === 2) {
|
if (activeStep === 2) {
|
||||||
setAnalysisStarted(true);
|
setAnalysisStarted(true);
|
||||||
@ -433,15 +419,20 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
|
|||||||
<Box sx={{ mt: 3 }}>
|
<Box sx={{ mt: 3 }}>
|
||||||
{selectedCandidate && (
|
{selectedCandidate && (
|
||||||
<JobMatchAnalysis
|
<JobMatchAnalysis
|
||||||
jobTitle={jobTitle}
|
job={{ title: jobTitle, description: jobDescription }}
|
||||||
candidateName={selectedCandidate.fullName}
|
candidate={selectedCandidate}
|
||||||
fetchRequirements={fetchRequirements}
|
{...backstoryProps}
|
||||||
fetchMatchForRequirement={fetchMatchForRequirement}
|
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
</Box>
|
</Box>
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const renderResume = () => (
|
||||||
|
<Box sx={{ mt: 3 }}>
|
||||||
|
{selectedCandidate && <ComingSoon>Resume Builder</ComingSoon>}
|
||||||
|
</Box>
|
||||||
|
);
|
||||||
|
|
||||||
// If no user is logged in, show message
|
// If no user is logged in, show message
|
||||||
if (!user) {
|
if (!user) {
|
||||||
return (
|
return (
|
||||||
@ -464,6 +455,7 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
|
|||||||
<Typography variant="h4" component="h1" gutterBottom>
|
<Typography variant="h4" component="h1" gutterBottom>
|
||||||
Candidate Analysis
|
Candidate Analysis
|
||||||
</Typography>
|
</Typography>
|
||||||
|
{selectedCandidate && <CandidateInfo variant="small" candidate={selectedCandidate} />}
|
||||||
<Typography variant="subtitle1" color="text.secondary" gutterBottom>
|
<Typography variant="subtitle1" color="text.secondary" gutterBottom>
|
||||||
Match candidates to job requirements with AI-powered analysis
|
Match candidates to job requirements with AI-powered analysis
|
||||||
</Typography>
|
</Typography>
|
||||||
@ -496,6 +488,7 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
|
|||||||
{activeStep === 0 && renderCandidateSelection()}
|
{activeStep === 0 && renderCandidateSelection()}
|
||||||
{activeStep === 1 && renderJobDescription()}
|
{activeStep === 1 && renderJobDescription()}
|
||||||
{activeStep === 2 && renderAnalysis()}
|
{activeStep === 2 && renderAnalysis()}
|
||||||
|
{activeStep === 3 && renderResume()}
|
||||||
|
|
||||||
<Box sx={{ display: 'flex', flexDirection: 'row', pt: 2 }}>
|
<Box sx={{ display: 'flex', flexDirection: 'row', pt: 2 }}>
|
||||||
<Button
|
<Button
|
||||||
|
@ -824,6 +824,18 @@ class ApiClient {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async candidateMatchForRequirement(candidate_id: string, requirement: string) : Promise<Types.SkillMatch> {
|
||||||
|
const response = await fetch(`${this.baseUrl}/candidates/${candidate_id}/skill-match`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: this.defaultHeaders,
|
||||||
|
body: JSON.stringify(requirement)
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = await handleApiResponse<Types.SkillMatch>(response);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
async updateCandidateDocument(document: Types.Document) : Promise<Types.Document> {
|
async updateCandidateDocument(document: Types.Document) : Promise<Types.Document> {
|
||||||
const request : Types.DocumentUpdateRequest = {
|
const request : Types.DocumentUpdateRequest = {
|
||||||
filename: document.filename,
|
filename: document.filename,
|
||||||
@ -1040,7 +1052,11 @@ class ApiClient {
|
|||||||
|
|
||||||
default:
|
default:
|
||||||
incomingMessageList.push(convertedIncoming);
|
incomingMessageList.push(convertedIncoming);
|
||||||
|
try {
|
||||||
options.onMessage?.(convertedIncoming);
|
options.onMessage?.(convertedIncoming);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('onMessage handler failed: ', error);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
// Generated TypeScript types from Pydantic models
|
// Generated TypeScript types from Pydantic models
|
||||||
// Source: src/backend/models.py
|
// Source: src/backend/models.py
|
||||||
// Generated on: 2025-06-03T18:51:32.304683
|
// Generated on: 2025-06-04T03:59:11.250216
|
||||||
// DO NOT EDIT MANUALLY - This file is auto-generated
|
// DO NOT EDIT MANUALLY - This file is auto-generated
|
||||||
|
|
||||||
// ============================
|
// ============================
|
||||||
@ -13,13 +13,13 @@ export type ActivityType = "login" | "search" | "view_job" | "apply_job" | "mess
|
|||||||
|
|
||||||
export type ApplicationStatus = "applied" | "reviewing" | "interview" | "offer" | "rejected" | "accepted" | "withdrawn";
|
export type ApplicationStatus = "applied" | "reviewing" | "interview" | "offer" | "rejected" | "accepted" | "withdrawn";
|
||||||
|
|
||||||
export type ChatContextType = "job_search" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_image" | "rag_search";
|
export type ChatContextType = "job_search" | "job_requirements" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_image" | "rag_search" | "skill_match";
|
||||||
|
|
||||||
export type ChatMessageType = "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
export type ChatMessageType = "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||||
|
|
||||||
export type ChatSenderType = "user" | "assistant" | "system";
|
export type ChatSenderType = "user" | "assistant" | "agent" | "system";
|
||||||
|
|
||||||
export type ChatStatusType = "initializing" | "streaming" | "done" | "error";
|
export type ChatStatusType = "initializing" | "streaming" | "status" | "done" | "error";
|
||||||
|
|
||||||
export type ColorBlindMode = "protanopia" | "deuteranopia" | "tritanopia" | "none";
|
export type ColorBlindMode = "protanopia" | "deuteranopia" | "tritanopia" | "none";
|
||||||
|
|
||||||
@ -49,6 +49,8 @@ export type SearchType = "similarity" | "mmr" | "hybrid" | "keyword";
|
|||||||
|
|
||||||
export type SkillLevel = "beginner" | "intermediate" | "advanced" | "expert";
|
export type SkillLevel = "beginner" | "intermediate" | "advanced" | "expert";
|
||||||
|
|
||||||
|
export type SkillStatus = "pending" | "complete" | "error";
|
||||||
|
|
||||||
export type SocialPlatform = "linkedin" | "twitter" | "github" | "dribbble" | "behance" | "website" | "other";
|
export type SocialPlatform = "linkedin" | "twitter" | "github" | "dribbble" | "behance" | "website" | "other";
|
||||||
|
|
||||||
export type SortOrder = "asc" | "desc";
|
export type SortOrder = "asc" | "desc";
|
||||||
@ -272,7 +274,7 @@ export interface Certification {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export interface ChatContext {
|
export interface ChatContext {
|
||||||
type: "job_search" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_image" | "rag_search";
|
type: "job_search" | "job_requirements" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_image" | "rag_search" | "skill_match";
|
||||||
relatedEntityId?: string;
|
relatedEntityId?: string;
|
||||||
relatedEntityType?: "job" | "candidate" | "employer";
|
relatedEntityType?: "job" | "candidate" | "employer";
|
||||||
additionalContext?: Record<string, any>;
|
additionalContext?: Record<string, any>;
|
||||||
@ -282,10 +284,10 @@ export interface ChatMessage {
|
|||||||
id?: string;
|
id?: string;
|
||||||
sessionId: string;
|
sessionId: string;
|
||||||
senderId?: string;
|
senderId?: string;
|
||||||
status: "initializing" | "streaming" | "done" | "error";
|
status: "initializing" | "streaming" | "status" | "done" | "error";
|
||||||
type: "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
type: "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||||
sender: "user" | "assistant" | "system";
|
sender: "user" | "assistant" | "agent" | "system";
|
||||||
timestamp: Date;
|
timestamp?: Date;
|
||||||
tunables?: Tunables;
|
tunables?: Tunables;
|
||||||
content: string;
|
content: string;
|
||||||
metadata?: ChatMessageMetaData;
|
metadata?: ChatMessageMetaData;
|
||||||
@ -295,16 +297,16 @@ export interface ChatMessageBase {
|
|||||||
id?: string;
|
id?: string;
|
||||||
sessionId: string;
|
sessionId: string;
|
||||||
senderId?: string;
|
senderId?: string;
|
||||||
status: "initializing" | "streaming" | "done" | "error";
|
status: "initializing" | "streaming" | "status" | "done" | "error";
|
||||||
type: "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
type: "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||||
sender: "user" | "assistant" | "system";
|
sender: "user" | "assistant" | "agent" | "system";
|
||||||
timestamp: Date;
|
timestamp?: Date;
|
||||||
tunables?: Tunables;
|
tunables?: Tunables;
|
||||||
content: string;
|
content: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface ChatMessageMetaData {
|
export interface ChatMessageMetaData {
|
||||||
model: "qwen2.5";
|
model: "qwen2.5" | "flux-schnell";
|
||||||
temperature: number;
|
temperature: number;
|
||||||
maxTokens: number;
|
maxTokens: number;
|
||||||
topP: number;
|
topP: number;
|
||||||
@ -326,10 +328,10 @@ export interface ChatMessageRagSearch {
|
|||||||
id?: string;
|
id?: string;
|
||||||
sessionId: string;
|
sessionId: string;
|
||||||
senderId?: string;
|
senderId?: string;
|
||||||
status: "done";
|
status: "initializing" | "streaming" | "status" | "done" | "error";
|
||||||
type: "rag_result";
|
type: "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||||
sender: "user";
|
sender: "user" | "assistant" | "agent" | "system";
|
||||||
timestamp: Date;
|
timestamp?: Date;
|
||||||
tunables?: Tunables;
|
tunables?: Tunables;
|
||||||
content: string;
|
content: string;
|
||||||
dimensions: number;
|
dimensions: number;
|
||||||
@ -339,10 +341,10 @@ export interface ChatMessageUser {
|
|||||||
id?: string;
|
id?: string;
|
||||||
sessionId: string;
|
sessionId: string;
|
||||||
senderId?: string;
|
senderId?: string;
|
||||||
status: "done";
|
status: "initializing" | "streaming" | "status" | "done" | "error";
|
||||||
type: "user";
|
type: "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||||
sender: "user";
|
sender: "user" | "assistant" | "agent" | "system";
|
||||||
timestamp: Date;
|
timestamp?: Date;
|
||||||
tunables?: Tunables;
|
tunables?: Tunables;
|
||||||
content: string;
|
content: string;
|
||||||
}
|
}
|
||||||
@ -386,6 +388,12 @@ export interface ChromaDBGetResponse {
|
|||||||
umapEmbedding3D?: Array<number>;
|
umapEmbedding3D?: Array<number>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface Citation {
|
||||||
|
text: string;
|
||||||
|
source: string;
|
||||||
|
relevance: number;
|
||||||
|
}
|
||||||
|
|
||||||
export interface CreateCandidateRequest {
|
export interface CreateCandidateRequest {
|
||||||
email: string;
|
email: string;
|
||||||
username: string;
|
username: string;
|
||||||
@ -613,6 +621,16 @@ export interface JobListResponse {
|
|||||||
meta?: Record<string, any>;
|
meta?: Record<string, any>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface JobRequirements {
|
||||||
|
technicalSkills: Requirements;
|
||||||
|
experienceRequirements: Requirements;
|
||||||
|
softSkills?: Array<string>;
|
||||||
|
experience?: Array<string>;
|
||||||
|
education?: Array<string>;
|
||||||
|
certifications?: Array<string>;
|
||||||
|
preferredAttributes?: Array<string>;
|
||||||
|
}
|
||||||
|
|
||||||
export interface JobResponse {
|
export interface JobResponse {
|
||||||
success: boolean;
|
success: boolean;
|
||||||
data?: Job;
|
data?: Job;
|
||||||
@ -765,6 +783,11 @@ export interface RefreshToken {
|
|||||||
revokedReason?: string;
|
revokedReason?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface Requirements {
|
||||||
|
required?: Array<string>;
|
||||||
|
preferred?: Array<string>;
|
||||||
|
}
|
||||||
|
|
||||||
export interface ResendVerificationRequest {
|
export interface ResendVerificationRequest {
|
||||||
email: string;
|
email: string;
|
||||||
}
|
}
|
||||||
@ -810,6 +833,14 @@ export interface SkillAssessment {
|
|||||||
comments?: string;
|
comments?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface SkillMatch {
|
||||||
|
requirement: string;
|
||||||
|
status: "pending" | "complete" | "error";
|
||||||
|
matchScore: number;
|
||||||
|
assessment: string;
|
||||||
|
citations?: Array<Citation>;
|
||||||
|
}
|
||||||
|
|
||||||
export interface SocialLink {
|
export interface SocialLink {
|
||||||
platform: "linkedin" | "twitter" | "github" | "dribbble" | "behance" | "website" | "other";
|
platform: "linkedin" | "twitter" | "github" | "dribbble" | "behance" | "website" | "other";
|
||||||
url: string;
|
url: string;
|
||||||
@ -1017,7 +1048,7 @@ export function convertChatMessageFromApi(data: any): ChatMessage {
|
|||||||
return {
|
return {
|
||||||
...data,
|
...data,
|
||||||
// Convert timestamp from ISO string to Date
|
// Convert timestamp from ISO string to Date
|
||||||
timestamp: new Date(data.timestamp),
|
timestamp: data.timestamp ? new Date(data.timestamp) : undefined,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
@ -1030,7 +1061,7 @@ export function convertChatMessageBaseFromApi(data: any): ChatMessageBase {
|
|||||||
return {
|
return {
|
||||||
...data,
|
...data,
|
||||||
// Convert timestamp from ISO string to Date
|
// Convert timestamp from ISO string to Date
|
||||||
timestamp: new Date(data.timestamp),
|
timestamp: data.timestamp ? new Date(data.timestamp) : undefined,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
@ -1043,7 +1074,7 @@ export function convertChatMessageRagSearchFromApi(data: any): ChatMessageRagSea
|
|||||||
return {
|
return {
|
||||||
...data,
|
...data,
|
||||||
// Convert timestamp from ISO string to Date
|
// Convert timestamp from ISO string to Date
|
||||||
timestamp: new Date(data.timestamp),
|
timestamp: data.timestamp ? new Date(data.timestamp) : undefined,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
@ -1056,7 +1087,7 @@ export function convertChatMessageUserFromApi(data: any): ChatMessageUser {
|
|||||||
return {
|
return {
|
||||||
...data,
|
...data,
|
||||||
// Convert timestamp from ISO string to Date
|
// Convert timestamp from ISO string to Date
|
||||||
timestamp: new Date(data.timestamp),
|
timestamp: data.timestamp ? new Date(data.timestamp) : undefined,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
|
@ -17,6 +17,7 @@ from typing import (
|
|||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
import inspect
|
import inspect
|
||||||
|
import re
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import asyncio
|
import asyncio
|
||||||
from datetime import datetime, UTC
|
from datetime import datetime, UTC
|
||||||
@ -79,6 +80,12 @@ class Agent(BaseModel, ABC):
|
|||||||
def context_size(self, value: int):
|
def context_size(self, value: int):
|
||||||
Agent._context_size = value
|
Agent._context_size = value
|
||||||
|
|
||||||
|
async def get_last_item(self, generator):
|
||||||
|
last_item = None
|
||||||
|
async for item in generator:
|
||||||
|
last_item = item
|
||||||
|
return last_item
|
||||||
|
|
||||||
def set_optimal_context_size(
|
def set_optimal_context_size(
|
||||||
self, llm: Any, model: str, prompt: str, ctx_buffer=2048
|
self, llm: Any, model: str, prompt: str, ctx_buffer=2048
|
||||||
) -> int:
|
) -> int:
|
||||||
@ -297,7 +304,7 @@ class Agent(BaseModel, ABC):
|
|||||||
self,
|
self,
|
||||||
chat_message: ChatMessage,
|
chat_message: ChatMessage,
|
||||||
top_k: int=defines.default_rag_top_k,
|
top_k: int=defines.default_rag_top_k,
|
||||||
threshold: float=defines.default_rag_threshold
|
threshold: float=defines.default_rag_threshold,
|
||||||
) -> AsyncGenerator[ChatMessage, None]:
|
) -> AsyncGenerator[ChatMessage, None]:
|
||||||
"""
|
"""
|
||||||
Generate RAG results for the given query.
|
Generate RAG results for the given query.
|
||||||
@ -320,24 +327,29 @@ class Agent(BaseModel, ABC):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if not self.user:
|
if not self.user:
|
||||||
|
logger.error("No user set for RAG generation")
|
||||||
rag_message.status = ChatStatusType.DONE
|
rag_message.status = ChatStatusType.DONE
|
||||||
rag_message.content = "No user connected to this chat, so no RAG content."
|
rag_message.content = ""
|
||||||
yield rag_message
|
yield rag_message
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
entries: int = 0
|
entries: int = 0
|
||||||
user: Candidate = self.user
|
user: Candidate = self.user
|
||||||
|
rag_content: str = ""
|
||||||
for rag in user.rags:
|
for rag in user.rags:
|
||||||
if not rag.enabled:
|
if not rag.enabled:
|
||||||
continue
|
continue
|
||||||
rag_message.type = ChatMessageType.SEARCHING
|
status_message = ChatMessage(
|
||||||
rag_message.status = ChatStatusType.INITIALIZING
|
session_id=chat_message.session_id,
|
||||||
rag_message.content = f"Checking RAG context {rag.name}..."
|
sender=ChatSenderType.AGENT,
|
||||||
yield rag_message
|
status = ChatStatusType.INITIALIZING,
|
||||||
|
type = ChatMessageType.SEARCHING,
|
||||||
|
content = f"Checking RAG context {rag.name}...")
|
||||||
|
yield status_message
|
||||||
|
|
||||||
chroma_results = user.file_watcher.find_similar(
|
chroma_results = user.file_watcher.find_similar(
|
||||||
query=rag_message.content, top_k=top_k, threshold=threshold
|
query=chat_message.content, top_k=top_k, threshold=threshold
|
||||||
)
|
)
|
||||||
if chroma_results:
|
if chroma_results:
|
||||||
query_embedding = np.array(chroma_results["query_embedding"]).flatten()
|
query_embedding = np.array(chroma_results["query_embedding"]).flatten()
|
||||||
@ -360,15 +372,26 @@ class Agent(BaseModel, ABC):
|
|||||||
|
|
||||||
entries += len(rag_metadata.documents)
|
entries += len(rag_metadata.documents)
|
||||||
rag_message.metadata.rag_results.append(rag_metadata)
|
rag_message.metadata.rag_results.append(rag_metadata)
|
||||||
rag_message.content = f"Results from {rag.name} RAG: {len(rag_metadata.documents)} results."
|
|
||||||
yield rag_message
|
|
||||||
|
|
||||||
rag_message.content = (
|
for index, metadata in enumerate(chroma_results["metadatas"]):
|
||||||
f"RAG context gathered from results from {entries} documents."
|
content = "\n".join(
|
||||||
)
|
[
|
||||||
|
line.strip()
|
||||||
|
for line in chroma_results["documents"][index].split("\n")
|
||||||
|
if line
|
||||||
|
]
|
||||||
|
).strip()
|
||||||
|
rag_content += f"""
|
||||||
|
Source: {metadata.get("doc_type", "unknown")}: {metadata.get("path", "")}
|
||||||
|
Document reference: {chroma_results["ids"][index]}
|
||||||
|
Content: { content }
|
||||||
|
"""
|
||||||
|
rag_message.content = rag_content.strip()
|
||||||
|
rag_message.type = ChatMessageType.RAG_RESULT
|
||||||
rag_message.status = ChatStatusType.DONE
|
rag_message.status = ChatStatusType.DONE
|
||||||
yield rag_message
|
yield rag_message
|
||||||
return
|
return
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
rag_message.status = ChatStatusType.ERROR
|
rag_message.status = ChatStatusType.ERROR
|
||||||
rag_message.content = f"Error generating RAG results: {str(e)}"
|
rag_message.content = f"Error generating RAG results: {str(e)}"
|
||||||
@ -377,6 +400,80 @@ class Agent(BaseModel, ABC):
|
|||||||
yield rag_message
|
yield rag_message
|
||||||
return
|
return
|
||||||
|
|
||||||
|
async def llm_one_shot(self, llm: Any, model: str, user_message: ChatMessageUser, system_prompt: str, temperature=0.7):
|
||||||
|
chat_message = ChatMessage(
|
||||||
|
session_id=user_message.session_id,
|
||||||
|
tunables=user_message.tunables,
|
||||||
|
status=ChatStatusType.INITIALIZING,
|
||||||
|
type=ChatMessageType.PREPARING,
|
||||||
|
sender=ChatSenderType.AGENT,
|
||||||
|
content="",
|
||||||
|
timestamp=datetime.now(UTC)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.set_optimal_context_size(
|
||||||
|
llm, model, prompt=chat_message.content
|
||||||
|
)
|
||||||
|
|
||||||
|
chat_message.metadata = ChatMessageMetaData()
|
||||||
|
chat_message.metadata.options = ChatOptions(
|
||||||
|
seed=8911,
|
||||||
|
num_ctx=self.context_size,
|
||||||
|
temperature=temperature, # Higher temperature to encourage tool usage
|
||||||
|
)
|
||||||
|
|
||||||
|
messages: List[LLMMessage] = [
|
||||||
|
LLMMessage(role="system", content=system_prompt),
|
||||||
|
LLMMessage(role="user", content=user_message.content),
|
||||||
|
]
|
||||||
|
|
||||||
|
# Reset the response for streaming
|
||||||
|
chat_message.content = ""
|
||||||
|
chat_message.type = ChatMessageType.GENERATING
|
||||||
|
chat_message.status = ChatStatusType.STREAMING
|
||||||
|
|
||||||
|
logger.info(f"Message options: {chat_message.metadata.options.model_dump(exclude_unset=True)}")
|
||||||
|
response = None
|
||||||
|
for response in llm.chat(
|
||||||
|
model=model,
|
||||||
|
messages=messages,
|
||||||
|
options={
|
||||||
|
**chat_message.metadata.options.model_dump(exclude_unset=True),
|
||||||
|
},
|
||||||
|
stream=True,
|
||||||
|
):
|
||||||
|
if not response:
|
||||||
|
chat_message.status = ChatStatusType.ERROR
|
||||||
|
chat_message.content = "No response from LLM."
|
||||||
|
yield chat_message
|
||||||
|
return
|
||||||
|
|
||||||
|
chat_message.content += response.message.content
|
||||||
|
|
||||||
|
if not response.done:
|
||||||
|
chat_chunk = model_cast.cast_to_model(ChatMessageBase, chat_message)
|
||||||
|
chat_chunk.content = response.message.content
|
||||||
|
yield chat_message
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not response:
|
||||||
|
chat_message.status = ChatStatusType.ERROR
|
||||||
|
chat_message.content = "No response from LLM."
|
||||||
|
yield chat_message
|
||||||
|
return
|
||||||
|
|
||||||
|
self.collect_metrics(response)
|
||||||
|
chat_message.metadata.eval_count += response.eval_count
|
||||||
|
chat_message.metadata.eval_duration += response.eval_duration
|
||||||
|
chat_message.metadata.prompt_eval_count += response.prompt_eval_count
|
||||||
|
chat_message.metadata.prompt_eval_duration += response.prompt_eval_duration
|
||||||
|
self.context_tokens = (
|
||||||
|
response.prompt_eval_count + response.eval_count
|
||||||
|
)
|
||||||
|
chat_message.type = ChatMessageType.RESPONSE
|
||||||
|
chat_message.status = ChatStatusType.DONE
|
||||||
|
yield chat_message
|
||||||
|
|
||||||
async def generate(
|
async def generate(
|
||||||
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate | None, temperature=0.7
|
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate | None, temperature=0.7
|
||||||
) -> AsyncGenerator[ChatMessage | ChatMessageBase, None]:
|
) -> AsyncGenerator[ChatMessage | ChatMessageBase, None]:
|
||||||
@ -392,6 +489,10 @@ class Agent(BaseModel, ABC):
|
|||||||
timestamp=datetime.now(UTC)
|
timestamp=datetime.now(UTC)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.set_optimal_context_size(
|
||||||
|
llm, model, prompt=chat_message.content
|
||||||
|
)
|
||||||
|
|
||||||
chat_message.metadata = ChatMessageMetaData()
|
chat_message.metadata = ChatMessageMetaData()
|
||||||
chat_message.metadata.options = ChatOptions(
|
chat_message.metadata.options = ChatOptions(
|
||||||
seed=8911,
|
seed=8911,
|
||||||
@ -679,6 +780,20 @@ Content: { content }
|
|||||||
|
|
||||||
# return
|
# return
|
||||||
|
|
||||||
|
def extract_json_from_text(self, text: str) -> str:
|
||||||
|
"""Extract JSON string from text that may contain other content."""
|
||||||
|
json_pattern = r"```json\s*([\s\S]*?)\s*```"
|
||||||
|
match = re.search(json_pattern, text)
|
||||||
|
if match:
|
||||||
|
return match.group(1).strip()
|
||||||
|
|
||||||
|
# Try to find JSON without the markdown code block
|
||||||
|
json_pattern = r"({[\s\S]*})"
|
||||||
|
match = re.search(json_pattern, text)
|
||||||
|
if match:
|
||||||
|
return match.group(1).strip()
|
||||||
|
|
||||||
|
raise ValueError("No JSON found in the response")
|
||||||
|
|
||||||
# Register the base agent
|
# Register the base agent
|
||||||
agent_registry.register(Agent._agent_type, Agent)
|
agent_registry.register(Agent._agent_type, Agent)
|
||||||
|
@ -315,69 +315,6 @@ class GeneratePersona(Agent):
|
|||||||
self.first_name, self.last_name, self.ethnicity, self.gender = self.generator.generate_random_name()
|
self.first_name, self.last_name, self.ethnicity, self.gender = self.generator.generate_random_name()
|
||||||
self.full_name = f"{self.first_name} {self.last_name}"
|
self.full_name = f"{self.first_name} {self.last_name}"
|
||||||
|
|
||||||
async def call_llm(self, llm: Any, model: str, user_message: ChatMessageUser, system_prompt: str, temperature=0.7):
|
|
||||||
chat_message = ChatMessage(
|
|
||||||
session_id=user_message.session_id,
|
|
||||||
tunables=user_message.tunables,
|
|
||||||
status=ChatStatusType.INITIALIZING,
|
|
||||||
type=ChatMessageType.PREPARING,
|
|
||||||
sender=ChatSenderType.ASSISTANT,
|
|
||||||
content="",
|
|
||||||
timestamp=datetime.now(UTC)
|
|
||||||
)
|
|
||||||
|
|
||||||
chat_message.metadata = ChatMessageMetaData()
|
|
||||||
chat_message.metadata.options = ChatOptions(
|
|
||||||
seed=8911,
|
|
||||||
num_ctx=self.context_size,
|
|
||||||
temperature=temperature, # Higher temperature to encourage tool usage
|
|
||||||
)
|
|
||||||
|
|
||||||
messages: List[LLMMessage] = [
|
|
||||||
LLMMessage(role="system", content=system_prompt),
|
|
||||||
LLMMessage(role="user", content=user_message.content),
|
|
||||||
]
|
|
||||||
|
|
||||||
# Reset the response for streaming
|
|
||||||
chat_message.content = ""
|
|
||||||
chat_message.type = ChatMessageType.GENERATING
|
|
||||||
chat_message.status = ChatStatusType.STREAMING
|
|
||||||
|
|
||||||
for response in llm.chat(
|
|
||||||
model=model,
|
|
||||||
messages=messages,
|
|
||||||
options={
|
|
||||||
**chat_message.metadata.options.model_dump(exclude_unset=True),
|
|
||||||
},
|
|
||||||
stream=True,
|
|
||||||
):
|
|
||||||
if not response:
|
|
||||||
chat_message.status = ChatStatusType.ERROR
|
|
||||||
chat_message.content = "No response from LLM."
|
|
||||||
yield chat_message
|
|
||||||
return
|
|
||||||
|
|
||||||
chat_message.content += response.message.content
|
|
||||||
|
|
||||||
if not response.done:
|
|
||||||
chat_chunk = model_cast.cast_to_model(ChatMessageBase, chat_message)
|
|
||||||
chat_chunk.content = response.message.content
|
|
||||||
yield chat_message
|
|
||||||
continue
|
|
||||||
|
|
||||||
if response.done:
|
|
||||||
self.collect_metrics(response)
|
|
||||||
chat_message.metadata.eval_count += response.eval_count
|
|
||||||
chat_message.metadata.eval_duration += response.eval_duration
|
|
||||||
chat_message.metadata.prompt_eval_count += response.prompt_eval_count
|
|
||||||
chat_message.metadata.prompt_eval_duration += response.prompt_eval_duration
|
|
||||||
self.context_tokens = (
|
|
||||||
response.prompt_eval_count + response.eval_count
|
|
||||||
)
|
|
||||||
chat_message.type = ChatMessageType.RESPONSE
|
|
||||||
chat_message.status = ChatStatusType.DONE
|
|
||||||
yield chat_message
|
|
||||||
|
|
||||||
async def generate(
|
async def generate(
|
||||||
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate, temperature=0.7
|
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate, temperature=0.7
|
||||||
):
|
):
|
||||||
@ -409,7 +346,7 @@ Incorporate the following into the job description: {original_prompt}
|
|||||||
#
|
#
|
||||||
logger.info(f"🤖 Generating persona for {self.full_name}")
|
logger.info(f"🤖 Generating persona for {self.full_name}")
|
||||||
generating_message = None
|
generating_message = None
|
||||||
async for generating_message in self.call_llm(
|
async for generating_message in self.llm_one_shot(
|
||||||
llm=llm, model=model,
|
llm=llm, model=model,
|
||||||
user_message=user_message,
|
user_message=user_message,
|
||||||
system_prompt=generate_persona_system_prompt,
|
system_prompt=generate_persona_system_prompt,
|
||||||
@ -515,7 +452,7 @@ Incorporate the following into the job description: {original_prompt}
|
|||||||
user_message.content += f"""
|
user_message.content += f"""
|
||||||
Make sure at least one of the candidate's job descriptions take into account the following: {original_prompt}."""
|
Make sure at least one of the candidate's job descriptions take into account the following: {original_prompt}."""
|
||||||
|
|
||||||
async for generating_message in self.call_llm(
|
async for generating_message in self.llm_one_shot(
|
||||||
llm=llm, model=model,
|
llm=llm, model=model,
|
||||||
user_message=user_message,
|
user_message=user_message,
|
||||||
system_prompt=generate_resume_system_prompt,
|
system_prompt=generate_resume_system_prompt,
|
||||||
|
170
src/backend/agents/job_requirements.py
Normal file
170
src/backend/agents/job_requirements.py
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
from pydantic import model_validator, Field # type: ignore
|
||||||
|
from typing import (
|
||||||
|
Dict,
|
||||||
|
Literal,
|
||||||
|
ClassVar,
|
||||||
|
Any,
|
||||||
|
AsyncGenerator,
|
||||||
|
List,
|
||||||
|
Optional
|
||||||
|
# override
|
||||||
|
) # NOTE: You must import Optional for late binding to work
|
||||||
|
import inspect
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
import traceback
|
||||||
|
import asyncio
|
||||||
|
import time
|
||||||
|
import asyncio
|
||||||
|
import numpy as np # type: ignore
|
||||||
|
|
||||||
|
from .base import Agent, agent_registry, LLMMessage
|
||||||
|
from models import Candidate, ChatMessage, ChatMessageBase, ChatMessageMetaData, ChatMessageType, ChatMessageUser, ChatOptions, ChatSenderType, ChatStatusType, JobRequirements
|
||||||
|
import model_cast
|
||||||
|
from logger import logger
|
||||||
|
import defines
|
||||||
|
|
||||||
|
class JobRequirementsAgent(Agent):
|
||||||
|
agent_type: Literal["job_requirements"] = "job_requirements" # type: ignore
|
||||||
|
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
||||||
|
|
||||||
|
# Stage 1A: Job Analysis Implementation
|
||||||
|
def create_job_analysis_prompt(self, job_description: str) -> tuple[str, str]:
|
||||||
|
"""Create the prompt for job requirements analysis."""
|
||||||
|
logger.info(f"{self.agent_type} - {inspect.stack()[0].function}")
|
||||||
|
system_prompt = """
|
||||||
|
You are an objective job requirements analyzer. Your task is to extract and categorize the specific skills,
|
||||||
|
experiences, and qualifications required in a job description WITHOUT any reference to any candidate.
|
||||||
|
|
||||||
|
## INSTRUCTIONS:
|
||||||
|
|
||||||
|
1. Analyze ONLY the job description provided.
|
||||||
|
2. Extract and categorize all requirements and preferences.
|
||||||
|
3. DO NOT consider any candidate information - this is a pure job analysis task.
|
||||||
|
|
||||||
|
## OUTPUT FORMAT:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"job_requirements": {
|
||||||
|
"technical_skills": {
|
||||||
|
"required": ["skill1", "skill2"],
|
||||||
|
"preferred": ["skill1", "skill2"]
|
||||||
|
},
|
||||||
|
"experience_requirements": {
|
||||||
|
"required": ["exp1", "exp2"],
|
||||||
|
"preferred": ["exp1", "exp2"]
|
||||||
|
},
|
||||||
|
"education_requirements": ["req1", "req2"],
|
||||||
|
"soft_skills": ["skill1", "skill2"],
|
||||||
|
"industry_knowledge": ["knowledge1", "knowledge2"],
|
||||||
|
"responsibilities": ["resp1", "resp2"],
|
||||||
|
"company_values": ["value1", "value2"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Be specific and detailed in your extraction. Break down compound requirements into individual components.
|
||||||
|
For example, "5+ years experience with React, Node.js and MongoDB" should be separated into:
|
||||||
|
- Experience: "5+ years software development"
|
||||||
|
- Technical skills: "React", "Node.js", "MongoDB"
|
||||||
|
|
||||||
|
Avoid vague categorizations and be precise about whether skills are explicitly required or just preferred.
|
||||||
|
"""
|
||||||
|
|
||||||
|
prompt = f"Job Description:\n{job_description}"
|
||||||
|
return system_prompt, prompt
|
||||||
|
|
||||||
|
async def analyze_job_requirements(
|
||||||
|
self, llm: Any, model: str, user_message: ChatMessage
|
||||||
|
) -> AsyncGenerator[ChatMessage, None]:
|
||||||
|
"""Analyze job requirements from job description."""
|
||||||
|
system_prompt, prompt = self.create_job_analysis_prompt(user_message.content)
|
||||||
|
analyze_message = user_message.model_copy()
|
||||||
|
analyze_message.content = prompt
|
||||||
|
generated_message = None
|
||||||
|
async for generated_message in self.llm_one_shot(llm, model, system_prompt=system_prompt, user_message=analyze_message):
|
||||||
|
if generated_message.status == ChatStatusType.ERROR:
|
||||||
|
generated_message.content = "Error analyzing job requirements."
|
||||||
|
yield generated_message
|
||||||
|
return
|
||||||
|
|
||||||
|
if not generated_message:
|
||||||
|
status_message = ChatMessage(
|
||||||
|
session_id=user_message.session_id,
|
||||||
|
sender=ChatSenderType.AGENT,
|
||||||
|
status = ChatStatusType.ERROR,
|
||||||
|
type = ChatMessageType.ERROR,
|
||||||
|
content = "Job requirements analysis failed to generate a response.")
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
|
||||||
|
generated_message.status = ChatStatusType.DONE
|
||||||
|
generated_message.type = ChatMessageType.RESPONSE
|
||||||
|
yield generated_message
|
||||||
|
return
|
||||||
|
|
||||||
|
async def generate(
|
||||||
|
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate | None, temperature=0.7
|
||||||
|
) -> AsyncGenerator[ChatMessage, None]:
|
||||||
|
# Stage 1A: Analyze job requirements
|
||||||
|
status_message = ChatMessage(
|
||||||
|
session_id=user_message.session_id,
|
||||||
|
sender=ChatSenderType.AGENT,
|
||||||
|
status=ChatStatusType.STATUS,
|
||||||
|
type=ChatMessageType.THINKING,
|
||||||
|
content = f"Analyzing job requirements")
|
||||||
|
yield status_message
|
||||||
|
|
||||||
|
generated_message = None
|
||||||
|
async for generated_message in self.analyze_job_requirements(llm, model, user_message):
|
||||||
|
if generated_message.status == ChatStatusType.ERROR:
|
||||||
|
status_message.status = ChatStatusType.ERROR
|
||||||
|
status_message.content = generated_message.content
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
if not generated_message:
|
||||||
|
status_message.status = ChatStatusType.ERROR
|
||||||
|
status_message.content = "Job requirements analysis failed."
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
|
||||||
|
json_str = self.extract_json_from_text(generated_message.content)
|
||||||
|
job_requirements : JobRequirements | None = None
|
||||||
|
job_requirements_data = ""
|
||||||
|
try:
|
||||||
|
job_requirements_data = json.loads(json_str)
|
||||||
|
job_requirements_data = job_requirements_data.get("job_requirements", None)
|
||||||
|
job_requirements = JobRequirements.model_validate(job_requirements_data)
|
||||||
|
if not job_requirements:
|
||||||
|
raise ValueError("Job requirements data is empty or invalid.")
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
status_message.status = ChatStatusType.ERROR
|
||||||
|
status_message.content = f"Failed to parse job requirements JSON: {str(e)}\n\n{job_requirements_data}"
|
||||||
|
logger.error(f"⚠️ {status_message.content}")
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
except ValueError as e:
|
||||||
|
status_message.status = ChatStatusType.ERROR
|
||||||
|
status_message.content = f"Job requirements validation error: {str(e)}\n\n{job_requirements_data}"
|
||||||
|
logger.error(f"⚠️ {status_message.content}")
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
except Exception as e:
|
||||||
|
status_message.status = ChatStatusType.ERROR
|
||||||
|
status_message.content = f"Unexpected error processing job requirements: {str(e)}\n\n{job_requirements_data}"
|
||||||
|
logger.error(traceback.format_exc())
|
||||||
|
logger.error(f"⚠️ {status_message.content}")
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
status_message.status = ChatStatusType.DONE
|
||||||
|
status_message.type = ChatMessageType.RESPONSE
|
||||||
|
status_message.content = json.dumps(job_requirements.model_dump(mode="json", exclude_unset=True))
|
||||||
|
yield status_message
|
||||||
|
|
||||||
|
logger.info(f"✅ Job requirements analysis completed successfully.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Register the base agent
|
||||||
|
agent_registry.register(JobRequirementsAgent._agent_type, JobRequirementsAgent)
|
233
src/backend/agents/skill_match.py
Normal file
233
src/backend/agents/skill_match.py
Normal file
@ -0,0 +1,233 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
from pydantic import model_validator, Field # type: ignore
|
||||||
|
from typing import (
|
||||||
|
Dict,
|
||||||
|
Literal,
|
||||||
|
ClassVar,
|
||||||
|
Any,
|
||||||
|
AsyncGenerator,
|
||||||
|
List,
|
||||||
|
Optional
|
||||||
|
# override
|
||||||
|
) # NOTE: You must import Optional for late binding to work
|
||||||
|
import inspect
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
import traceback
|
||||||
|
import asyncio
|
||||||
|
import time
|
||||||
|
import asyncio
|
||||||
|
import numpy as np # type: ignore
|
||||||
|
|
||||||
|
from .base import Agent, agent_registry, LLMMessage
|
||||||
|
from models import Candidate, ChatMessage, ChatMessageBase, ChatMessageMetaData, ChatMessageType, ChatMessageUser, ChatOptions, ChatSenderType, ChatStatusType, SkillMatch
|
||||||
|
import model_cast
|
||||||
|
from logger import logger
|
||||||
|
import defines
|
||||||
|
|
||||||
|
class SkillMatchAgent(Agent):
|
||||||
|
agent_type: Literal["skill_match"] = "skill_match" # type: ignore
|
||||||
|
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
||||||
|
|
||||||
|
def generate_skill_assessment_prompt(self, skill, rag_content):
|
||||||
|
"""
|
||||||
|
Generate a system prompt to query the LLM for evidence of a specific skill
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- skill (str): The specific skill to assess from job requirements
|
||||||
|
- rag_content (str): Additional RAG content queried from candidate documents
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- str: A system prompt tailored to assess the specific skill
|
||||||
|
"""
|
||||||
|
|
||||||
|
system_prompt = f"""You are an objective skill assessor. Your task is to determine if a candidate possesses
|
||||||
|
a SPECIFIC skill based solely on their resume and supporting evidence.
|
||||||
|
|
||||||
|
## SKILL TO ASSESS:
|
||||||
|
"{skill}"
|
||||||
|
|
||||||
|
## INSTRUCTIONS:
|
||||||
|
1. Focus exclusively on assessing the candidate's proficiency with the skill: "{skill}".
|
||||||
|
2. Examine the resume and supporting documents for both explicit mentions and clearly demonstrated applications of the skill.
|
||||||
|
3. Do NOT infer the skill unless it is either:
|
||||||
|
- Directly mentioned in association with experience, or
|
||||||
|
- Clearly evidenced through relevant tools, technologies, responsibilities, or outcomes.
|
||||||
|
- Referenced as being used or listed as part of a broader initiative.
|
||||||
|
4. Evaluate each piece of evidence and assign a confidence rating:
|
||||||
|
- STRONG: Explicit use with clear context or repeated/prolonged experience
|
||||||
|
- MODERATE: Inferred through tools, environments, or outcomes (e.g., Python used in a listed project/tool)
|
||||||
|
- WEAK: Mentioned in a list or indirectly implied without context
|
||||||
|
- NONE: No relevant evidence
|
||||||
|
5. Provide up to 10 evidence_details. Each should include:
|
||||||
|
- source: where the evidence appears
|
||||||
|
- quote: an exact snippet from the document(s)
|
||||||
|
- context: a short rationale explaining how this supports the skill claim
|
||||||
|
6. When no supporting evidence exists, output a "NONE" rating with an empty evidence details list.
|
||||||
|
7. IMPORTANT: Even if the skill is only listed in a technologies or languages section, consider this valid evidence.
|
||||||
|
|
||||||
|
## OUTPUT FORMAT:
|
||||||
|
```json
|
||||||
|
{{
|
||||||
|
"skill_assessment": {{
|
||||||
|
"skill": "{skill}",
|
||||||
|
"evidence_found": true/false,
|
||||||
|
"evidence_strength": "STRONG/MODERATE/WEAK/NONE",
|
||||||
|
"description": "short (two to three sentence) description of what {skill} means with a concise example of what you're looking for",
|
||||||
|
"evidence_details": [
|
||||||
|
{{
|
||||||
|
"source": "resume section/position/project",
|
||||||
|
"quote": "exact text from resume showing evidence",
|
||||||
|
"context": "brief explanation of how this demonstrates the skill"
|
||||||
|
}}
|
||||||
|
]
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
```
|
||||||
|
|
||||||
|
IMPORTANT: Be factual and precise. If you cannot find strong evidence for this specific skill, it's better to indicate "evidence_strength": WEAK than to stretch for connections.
|
||||||
|
Focus only on "{skill}" and not similar skills unless they directly demonstrate the required skill.
|
||||||
|
Remember that a skill listed in a "Languages" or "Technologies" section should be considered valid evidence.
|
||||||
|
|
||||||
|
Adhere strictly to the JSON output format requested. Do not include any additional text or commentary outside the JSON structure.
|
||||||
|
"""
|
||||||
|
|
||||||
|
prompt = f"""Analyze the candidate information below for evidence of "{skill}".
|
||||||
|
|
||||||
|
RESPOND WITH ONLY VALID JSON USING THE EXACT FORMAT SPECIFIED.
|
||||||
|
|
||||||
|
<candidate_info>
|
||||||
|
{rag_content}
|
||||||
|
</candidate_info>
|
||||||
|
|
||||||
|
JSON RESPONSE:"""
|
||||||
|
|
||||||
|
return system_prompt, prompt
|
||||||
|
|
||||||
|
async def analyze_job_requirements(
|
||||||
|
self, llm: Any, model: str, user_message: ChatMessage
|
||||||
|
) -> AsyncGenerator[ChatMessage, None]:
|
||||||
|
"""Analyze job requirements from job description."""
|
||||||
|
system_prompt, prompt = self.create_job_analysis_prompt(user_message.content)
|
||||||
|
analyze_message = user_message.model_copy()
|
||||||
|
analyze_message.content = prompt
|
||||||
|
generated_message = None
|
||||||
|
async for generated_message in self.llm_one_shot(llm, model, system_prompt=system_prompt, user_message=analyze_message):
|
||||||
|
if generated_message.status == ChatStatusType.ERROR:
|
||||||
|
generated_message.content = "Error analyzing job requirements."
|
||||||
|
yield generated_message
|
||||||
|
return
|
||||||
|
|
||||||
|
if not generated_message:
|
||||||
|
status_message = ChatMessage(
|
||||||
|
session_id=user_message.session_id,
|
||||||
|
sender=ChatSenderType.AGENT,
|
||||||
|
status = ChatStatusType.ERROR,
|
||||||
|
type = ChatMessageType.ERROR,
|
||||||
|
content = "Job requirements analysis failed to generate a response.")
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
|
||||||
|
generated_message.status = ChatStatusType.DONE
|
||||||
|
generated_message.type = ChatMessageType.RESPONSE
|
||||||
|
yield generated_message
|
||||||
|
return
|
||||||
|
|
||||||
|
async def generate(
|
||||||
|
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate | None, temperature=0.7
|
||||||
|
) -> AsyncGenerator[ChatMessage, None]:
|
||||||
|
# Stage 1A: Analyze job requirements
|
||||||
|
status_message = ChatMessage(
|
||||||
|
session_id=user_message.session_id,
|
||||||
|
sender=ChatSenderType.AGENT,
|
||||||
|
status=ChatStatusType.STATUS,
|
||||||
|
type=ChatMessageType.THINKING,
|
||||||
|
content = f"Analyzing job requirements")
|
||||||
|
yield status_message
|
||||||
|
|
||||||
|
rag_message = None
|
||||||
|
async for rag_message in self.generate_rag_results(chat_message=user_message):
|
||||||
|
if rag_message.status == ChatStatusType.ERROR:
|
||||||
|
status_message.status = ChatStatusType.ERROR
|
||||||
|
status_message.content = rag_message.content
|
||||||
|
logger.error(f"⚠️ {status_message.content}")
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
|
||||||
|
if rag_message is None:
|
||||||
|
status_message.status = ChatStatusType.ERROR
|
||||||
|
status_message.content = "Failed to retrieve RAG context."
|
||||||
|
logger.error(f"⚠️ {status_message.content}")
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info(f"🔍 RAG content retrieved: {len(rag_message.content)} bytes")
|
||||||
|
|
||||||
|
system_prompt, prompt = self.generate_skill_assessment_prompt(skill=user_message.content, rag_content=rag_message.content)
|
||||||
|
|
||||||
|
user_message.content = prompt
|
||||||
|
skill_assessment = None
|
||||||
|
async for skill_assessment in self.llm_one_shot(llm=llm, model=model, user_message=user_message, system_prompt=system_prompt, temperature=0.1):
|
||||||
|
if skill_assessment.status == ChatStatusType.ERROR:
|
||||||
|
status_message.status = ChatStatusType.ERROR
|
||||||
|
status_message.content = skill_assessment.content
|
||||||
|
logger.error(f"⚠️ {status_message.content}")
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
if skill_assessment is None:
|
||||||
|
status_message.status = ChatStatusType.ERROR
|
||||||
|
status_message.content = "Failed to generate skill assessment."
|
||||||
|
logger.error(f"⚠️ {status_message.content}")
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
|
||||||
|
json_str = self.extract_json_from_text(skill_assessment.content)
|
||||||
|
skill_match = json_str#: SkillMatch | None = None
|
||||||
|
skill_assessment_data = ""
|
||||||
|
try:
|
||||||
|
skill_assessment_data = json.loads(json_str)
|
||||||
|
match_level = (
|
||||||
|
skill_assessment_data
|
||||||
|
.get("skill_assessment", {})
|
||||||
|
.get("evidence_strength", "UNKNOWN")
|
||||||
|
)
|
||||||
|
skill_description = (
|
||||||
|
skill_assessment_data
|
||||||
|
.get("skill_assessment", {})
|
||||||
|
.get("description", "")
|
||||||
|
)
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
status_message.status = ChatStatusType.ERROR
|
||||||
|
status_message.content = f"Failed to parse Skill assessment JSON: {str(e)}\n\n{skill_assessment_data}"
|
||||||
|
logger.error(f"⚠️ {status_message.content}")
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
except ValueError as e:
|
||||||
|
status_message.status = ChatStatusType.ERROR
|
||||||
|
status_message.content = f"Skill assessment validation error: {str(e)}\n\n{skill_assessment_data}"
|
||||||
|
logger.error(f"⚠️ {status_message.content}")
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
except Exception as e:
|
||||||
|
status_message.status = ChatStatusType.ERROR
|
||||||
|
status_message.content = f"Unexpected error processing Skill assessment: {str(e)}\n\n{skill_assessment_data}"
|
||||||
|
logger.error(traceback.format_exc())
|
||||||
|
logger.error(f"⚠️ {status_message.content}")
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
if skill_match is None:
|
||||||
|
status_message.status = ChatStatusType.ERROR
|
||||||
|
status_message.content = "Skill assessment analysis failed to produce valid data."
|
||||||
|
logger.error(f"⚠️ {status_message.content}")
|
||||||
|
yield status_message
|
||||||
|
return
|
||||||
|
status_message.status = ChatStatusType.DONE
|
||||||
|
status_message.type = ChatMessageType.RESPONSE
|
||||||
|
status_message.content = skill_match
|
||||||
|
yield status_message
|
||||||
|
|
||||||
|
logger.info(f"✅ Skill assessment completed successfully.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Register the base agent
|
||||||
|
agent_registry.register(SkillMatchAgent._agent_type, SkillMatchAgent)
|
@ -198,13 +198,14 @@ def python_type_to_typescript(python_type: Any, field_info: Any = None, debug: b
|
|||||||
if debug and original_type != python_type:
|
if debug and original_type != python_type:
|
||||||
print(f" 🔄 Unwrapped: {original_type} -> {python_type}")
|
print(f" 🔄 Unwrapped: {original_type} -> {python_type}")
|
||||||
|
|
||||||
# Check if this field has a specific enum default value
|
# FIXED: Don't lock enum types to their default values
|
||||||
|
# Instead, always return the full enum type
|
||||||
if field_info:
|
if field_info:
|
||||||
default_enum = get_default_enum_value(field_info, debug)
|
default_enum = get_default_enum_value(field_info, debug)
|
||||||
if default_enum is not None:
|
if default_enum is not None:
|
||||||
if debug:
|
if debug:
|
||||||
print(f" 🎯 Field has specific enum default: {default_enum.value}")
|
print(f" 🎯 Field has specific enum default: {default_enum.value}, but returning full enum type")
|
||||||
return f'"{default_enum.value}"'
|
# Don't return just the default value - continue to process the full enum type
|
||||||
|
|
||||||
# Handle None/null
|
# Handle None/null
|
||||||
if python_type is type(None):
|
if python_type is type(None):
|
||||||
@ -780,7 +781,7 @@ def compile_typescript(ts_file: str) -> bool:
|
|||||||
def main():
|
def main():
|
||||||
"""Main function with command line argument parsing"""
|
"""Main function with command line argument parsing"""
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description='Generate TypeScript types from Pydantic models with date conversion functions and proper enum default handling',
|
description='Generate TypeScript types from Pydantic models with date conversion functions and proper enum handling',
|
||||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
epilog="""
|
epilog="""
|
||||||
Examples:
|
Examples:
|
||||||
@ -795,8 +796,8 @@ Generated conversion functions can be used like:
|
|||||||
const candidate = convertCandidateFromApi(apiResponse);
|
const candidate = convertCandidateFromApi(apiResponse);
|
||||||
const jobs = convertArrayFromApi<Job>(apiResponse, 'Job');
|
const jobs = convertArrayFromApi<Job>(apiResponse, 'Job');
|
||||||
|
|
||||||
Enum defaults are now properly handled:
|
Enum types are now properly handled:
|
||||||
status: ChatStatusType = ChatStatusType.DONE -> status: "done"
|
status: ChatStatusType = ChatStatusType.DONE -> status: ChatStatusType (not locked to "done")
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -833,12 +834,12 @@ Enum defaults are now properly handled:
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--version', '-v',
|
'--version', '-v',
|
||||||
action='version',
|
action='version',
|
||||||
version='TypeScript Generator 3.1 (with Enum Default Handling)'
|
version='TypeScript Generator 3.2 (Fixed Enum Default Handling)'
|
||||||
)
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
print("🚀 Enhanced TypeScript Type Generator with Enum Default Handling")
|
print("🚀 Enhanced TypeScript Type Generator with Fixed Enum Handling")
|
||||||
print("=" * 60)
|
print("=" * 60)
|
||||||
print(f"📁 Source file: {args.source}")
|
print(f"📁 Source file: {args.source}")
|
||||||
print(f"📁 Output file: {args.output}")
|
print(f"📁 Output file: {args.output}")
|
||||||
@ -883,12 +884,12 @@ Enum defaults are now properly handled:
|
|||||||
|
|
||||||
# Count conversion functions and provide detailed feedback
|
# Count conversion functions and provide detailed feedback
|
||||||
conversion_count = ts_content.count('export function convert') - ts_content.count('convertFromApi') - ts_content.count('convertArrayFromApi')
|
conversion_count = ts_content.count('export function convert') - ts_content.count('convertFromApi') - ts_content.count('convertArrayFromApi')
|
||||||
enum_specific_count = ts_content.count(': "') - ts_content.count('export type')
|
enum_type_count = ts_content.count('export type')
|
||||||
|
|
||||||
if conversion_count > 0:
|
if conversion_count > 0:
|
||||||
print(f"🗓️ Generated {conversion_count} date conversion functions")
|
print(f"🗓️ Generated {conversion_count} date conversion functions")
|
||||||
if enum_specific_count > 0:
|
if enum_type_count > 0:
|
||||||
print(f"🎯 Generated {enum_specific_count} specific enum default types")
|
print(f"🎯 Generated {enum_type_count} enum types (properly allowing all values)")
|
||||||
|
|
||||||
if args.debug:
|
if args.debug:
|
||||||
# Show which models have date conversion
|
# Show which models have date conversion
|
||||||
@ -903,7 +904,7 @@ Enum defaults are now properly handled:
|
|||||||
# Provide troubleshooting info if debug mode
|
# Provide troubleshooting info if debug mode
|
||||||
if args.debug:
|
if args.debug:
|
||||||
print(f"\n🐛 Debug mode was enabled. If you see incorrect type conversions:")
|
print(f"\n🐛 Debug mode was enabled. If you see incorrect type conversions:")
|
||||||
print(f" 1. Check the debug output above for '🎯 Field has specific enum default' lines")
|
print(f" 1. Check the debug output above for enum default handling")
|
||||||
print(f" 2. Look for '📅 Date type check' lines for date handling")
|
print(f" 2. Look for '📅 Date type check' lines for date handling")
|
||||||
print(f" 3. Look for '⚠️' warnings about fallback types")
|
print(f" 3. Look for '⚠️' warnings about fallback types")
|
||||||
print(f" 4. Verify your Pydantic model field types and defaults are correct")
|
print(f" 4. Verify your Pydantic model field types and defaults are correct")
|
||||||
@ -924,8 +925,8 @@ Enum defaults are now properly handled:
|
|||||||
print(f"✅ File size: {file_size} characters")
|
print(f"✅ File size: {file_size} characters")
|
||||||
if conversion_count > 0:
|
if conversion_count > 0:
|
||||||
print(f"✅ Date conversion functions: {conversion_count}")
|
print(f"✅ Date conversion functions: {conversion_count}")
|
||||||
if enum_specific_count > 0:
|
if enum_type_count > 0:
|
||||||
print(f"✅ Specific enum default types: {enum_specific_count}")
|
print(f"✅ Enum types (with full value range): {enum_type_count}")
|
||||||
if not args.skip_test:
|
if not args.skip_test:
|
||||||
print("✅ Model validation passed")
|
print("✅ Model validation passed")
|
||||||
if not args.skip_compile:
|
if not args.skip_compile:
|
||||||
|
@ -279,6 +279,12 @@ async def get_database() -> RedisDatabase:
|
|||||||
"""
|
"""
|
||||||
return db_manager.get_database()
|
return db_manager.get_database()
|
||||||
|
|
||||||
|
async def get_last_item(generator):
|
||||||
|
last_item = None
|
||||||
|
async for item in generator:
|
||||||
|
last_item = item
|
||||||
|
return last_item
|
||||||
|
|
||||||
def create_success_response(data: Any, meta: Optional[Dict] = None) -> Dict:
|
def create_success_response(data: Any, meta: Optional[Dict] = None) -> Dict:
|
||||||
return {
|
return {
|
||||||
"success": True,
|
"success": True,
|
||||||
@ -3050,7 +3056,7 @@ async def post_chat_session_message_stream(
|
|||||||
status_code=404,
|
status_code=404,
|
||||||
content=create_error_response("CANDIDATE_NOT_FOUND", "Candidate not found for this chat session")
|
content=create_error_response("CANDIDATE_NOT_FOUND", "Candidate not found for this chat session")
|
||||||
)
|
)
|
||||||
logger.info(f"🔗 User {current_user.id} posting message to chat session {user_message.session_id} with query: {user_message.content}")
|
logger.info(f"🔗 User {current_user.id} posting message to chat session {user_message.session_id} with query length: {len(user_message.content)}")
|
||||||
|
|
||||||
async with entities.get_candidate_entity(candidate=candidate) as candidate_entity:
|
async with entities.get_candidate_entity(candidate=candidate) as candidate_entity:
|
||||||
# Entity automatically released when done
|
# Entity automatically released when done
|
||||||
@ -3343,6 +3349,68 @@ async def reset_chat_session(
|
|||||||
content=create_error_response("RESET_ERROR", str(e))
|
content=create_error_response("RESET_ERROR", str(e))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@api_router.post("/candidates/{candidate_id}/skill-match")
|
||||||
|
async def get_candidate_skill_match(
|
||||||
|
candidate_id: str = Path(...),
|
||||||
|
requirement: str = Body(...),
|
||||||
|
current_user = Depends(get_current_user),
|
||||||
|
database: RedisDatabase = Depends(get_database)
|
||||||
|
):
|
||||||
|
"""Get skill match for a candidate against a requirement"""
|
||||||
|
try:
|
||||||
|
# Find candidate by ID
|
||||||
|
candidate_data = await database.get_candidate(candidate_id)
|
||||||
|
if not candidate_data:
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=404,
|
||||||
|
content=create_error_response("CANDIDATE_NOT_FOUND", f"Candidate with ID '{candidate_id}' not found")
|
||||||
|
)
|
||||||
|
|
||||||
|
candidate = Candidate.model_validate(candidate_data)
|
||||||
|
|
||||||
|
logger.info(f"🔍 Running skill match for candidate {candidate.id} against requirement: {requirement}")
|
||||||
|
async with entities.get_candidate_entity(candidate=candidate) as candidate_entity:
|
||||||
|
agent = candidate_entity.get_or_create_agent(agent_type=ChatContextType.SKILL_MATCH)
|
||||||
|
if not agent:
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=400,
|
||||||
|
content=create_error_response("AGENT_NOT_FOUND", "No skill match agent found for this candidate")
|
||||||
|
)
|
||||||
|
# Entity automatically released when done
|
||||||
|
skill_match = await get_last_item(
|
||||||
|
agent.generate(
|
||||||
|
llm=llm_manager.get_llm(),
|
||||||
|
model=defines.model,
|
||||||
|
user_message=ChatMessageUser(
|
||||||
|
sender_id=candidate.id,
|
||||||
|
session_id="",
|
||||||
|
content=requirement,
|
||||||
|
timestamp=datetime.now(UTC)
|
||||||
|
),
|
||||||
|
user=candidate,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if skill_match is None:
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=500,
|
||||||
|
content=create_error_response("NO_MATCH", "No skill match found for the given requirement")
|
||||||
|
)
|
||||||
|
skill_match = skill_match.content.strip()
|
||||||
|
logger.info(f"✅ Skill match found for candidate {candidate.id}: {skill_match}")
|
||||||
|
|
||||||
|
return create_success_response({
|
||||||
|
"candidateId": candidate.id,
|
||||||
|
"skillMatch": skill_match
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(traceback.format_exc())
|
||||||
|
logger.error(f"❌ Get candidate skill match error: {e}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=500,
|
||||||
|
content=create_error_response("SKILL_MATCH_ERROR", str(e))
|
||||||
|
)
|
||||||
|
|
||||||
@api_router.get("/candidates/{username}/chat-sessions")
|
@api_router.get("/candidates/{username}/chat-sessions")
|
||||||
async def get_candidate_chat_sessions(
|
async def get_candidate_chat_sessions(
|
||||||
username: str = Path(...),
|
username: str = Path(...),
|
||||||
|
@ -71,8 +71,51 @@ class InterviewRecommendation(str, Enum):
|
|||||||
class ChatSenderType(str, Enum):
|
class ChatSenderType(str, Enum):
|
||||||
USER = "user"
|
USER = "user"
|
||||||
ASSISTANT = "assistant"
|
ASSISTANT = "assistant"
|
||||||
|
AGENT = "agent"
|
||||||
SYSTEM = "system"
|
SYSTEM = "system"
|
||||||
|
|
||||||
|
class Requirements(BaseModel):
|
||||||
|
required: List[str] = Field(default_factory=list)
|
||||||
|
preferred: List[str] = Field(default_factory=list)
|
||||||
|
|
||||||
|
@model_validator(mode='before')
|
||||||
|
def validate_requirements(cls, values):
|
||||||
|
if not isinstance(values, dict):
|
||||||
|
raise ValueError("Requirements must be a dictionary with 'required' and 'preferred' keys.")
|
||||||
|
return values
|
||||||
|
|
||||||
|
class Citation(BaseModel):
|
||||||
|
text: str
|
||||||
|
source: str
|
||||||
|
relevance: int # 0-100 scale
|
||||||
|
|
||||||
|
class SkillStatus(str, Enum):
|
||||||
|
PENDING = "pending"
|
||||||
|
COMPLETE = "complete"
|
||||||
|
ERROR = "error"
|
||||||
|
|
||||||
|
class SkillMatch(BaseModel):
|
||||||
|
requirement: str
|
||||||
|
status: SkillStatus
|
||||||
|
match_score: int = Field(..., alias='matchScore')
|
||||||
|
assessment: str
|
||||||
|
citations: List[Citation] = Field(default_factory=list)
|
||||||
|
model_config = {
|
||||||
|
"populate_by_name": True # Allow both field names and aliases
|
||||||
|
}
|
||||||
|
|
||||||
|
class JobRequirements(BaseModel):
|
||||||
|
technical_skills: Requirements = Field(..., alias="technicalSkills")
|
||||||
|
experience_requirements: Requirements = Field(..., alias="experienceRequirements")
|
||||||
|
soft_skills: Optional[List[str]] = Field(default_factory=list, alias="softSkills")
|
||||||
|
experience: Optional[List[str]] = []
|
||||||
|
education: Optional[List[str]] = []
|
||||||
|
certifications: Optional[List[str]] = []
|
||||||
|
preferred_attributes: Optional[List[str]] = Field(None, alias="preferredAttributes")
|
||||||
|
model_config = {
|
||||||
|
"populate_by_name": True # Allow both field names and aliases
|
||||||
|
}
|
||||||
|
|
||||||
class ChatMessageType(str, Enum):
|
class ChatMessageType(str, Enum):
|
||||||
ERROR = "error"
|
ERROR = "error"
|
||||||
GENERATING = "generating"
|
GENERATING = "generating"
|
||||||
@ -97,6 +140,7 @@ class ChatStatusType(str, Enum):
|
|||||||
|
|
||||||
class ChatContextType(str, Enum):
|
class ChatContextType(str, Enum):
|
||||||
JOB_SEARCH = "job_search"
|
JOB_SEARCH = "job_search"
|
||||||
|
JOB_REQUIREMENTS = "job_requirements"
|
||||||
CANDIDATE_CHAT = "candidate_chat"
|
CANDIDATE_CHAT = "candidate_chat"
|
||||||
INTERVIEW_PREP = "interview_prep"
|
INTERVIEW_PREP = "interview_prep"
|
||||||
RESUME_REVIEW = "resume_review"
|
RESUME_REVIEW = "resume_review"
|
||||||
@ -105,6 +149,7 @@ class ChatContextType(str, Enum):
|
|||||||
GENERATE_PROFILE = "generate_profile"
|
GENERATE_PROFILE = "generate_profile"
|
||||||
GENERATE_IMAGE = "generate_image"
|
GENERATE_IMAGE = "generate_image"
|
||||||
RAG_SEARCH = "rag_search"
|
RAG_SEARCH = "rag_search"
|
||||||
|
SKILL_MATCH = "skill_match"
|
||||||
|
|
||||||
class AIModelType(str, Enum):
|
class AIModelType(str, Enum):
|
||||||
QWEN2_5 = "qwen2.5"
|
QWEN2_5 = "qwen2.5"
|
||||||
@ -709,20 +754,24 @@ class ChatOptions(BaseModel):
|
|||||||
seed: Optional[int] = 8911
|
seed: Optional[int] = 8911
|
||||||
num_ctx: Optional[int] = Field(default=None, alias="numCtx") # Number of context tokens
|
num_ctx: Optional[int] = Field(default=None, alias="numCtx") # Number of context tokens
|
||||||
temperature: Optional[float] = Field(default=0.7) # Higher temperature to encourage tool usage
|
temperature: Optional[float] = Field(default=0.7) # Higher temperature to encourage tool usage
|
||||||
|
model_config = {
|
||||||
|
"populate_by_name": True # Allow both field names and aliases
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class LLMMessage(BaseModel):
|
class LLMMessage(BaseModel):
|
||||||
role: str = Field(default="")
|
role: str = Field(default="")
|
||||||
content: str = Field(default="")
|
content: str = Field(default="")
|
||||||
tool_calls: Optional[List[Dict]] = Field(default={}, exclude=True)
|
tool_calls: Optional[List[Dict]] = Field(default=[], exclude=True)
|
||||||
|
|
||||||
|
|
||||||
class ChatMessageBase(BaseModel):
|
class ChatMessageBase(BaseModel):
|
||||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||||
session_id: str = Field(..., alias="sessionId")
|
session_id: str = Field(..., alias="sessionId")
|
||||||
sender_id: Optional[str] = Field(None, alias="senderId")
|
sender_id: Optional[str] = Field(None, alias="senderId")
|
||||||
status: ChatStatusType = ChatStatusType.INITIALIZING
|
status: ChatStatusType #= ChatStatusType.INITIALIZING
|
||||||
type: ChatMessageType = ChatMessageType.PREPARING
|
type: ChatMessageType #= ChatMessageType.PREPARING
|
||||||
sender: ChatSenderType = ChatSenderType.SYSTEM
|
sender: ChatSenderType #= ChatSenderType.SYSTEM
|
||||||
timestamp: datetime = Field(default_factory=lambda: datetime.now(UTC), alias="timestamp")
|
timestamp: datetime = Field(default_factory=lambda: datetime.now(UTC), alias="timestamp")
|
||||||
tunables: Optional[Tunables] = None
|
tunables: Optional[Tunables] = None
|
||||||
content: str = ""
|
content: str = ""
|
||||||
@ -758,8 +807,8 @@ class ChatMessageMetaData(BaseModel):
|
|||||||
}
|
}
|
||||||
|
|
||||||
class ChatMessageUser(ChatMessageBase):
|
class ChatMessageUser(ChatMessageBase):
|
||||||
status: ChatStatusType = ChatStatusType.DONE
|
status: ChatStatusType = ChatStatusType.INITIALIZING
|
||||||
type: ChatMessageType = ChatMessageType.USER
|
type: ChatMessageType = ChatMessageType.GENERATING
|
||||||
sender: ChatSenderType = ChatSenderType.USER
|
sender: ChatSenderType = ChatSenderType.USER
|
||||||
|
|
||||||
class ChatMessage(ChatMessageBase):
|
class ChatMessage(ChatMessageBase):
|
||||||
|
@ -473,6 +473,7 @@ class ChromaDBFileWatcher(FileSystemEventHandler):
|
|||||||
logging.error(chunk)
|
logging.error(chunk)
|
||||||
|
|
||||||
def prepare_metadata(self, meta: Dict[str, Any], buffer=defines.chunk_buffer)-> str | None:
|
def prepare_metadata(self, meta: Dict[str, Any], buffer=defines.chunk_buffer)-> str | None:
|
||||||
|
source_file = meta.get("source_file")
|
||||||
try:
|
try:
|
||||||
source_file = meta["source_file"]
|
source_file = meta["source_file"]
|
||||||
path_parts = source_file.split(os.sep)
|
path_parts = source_file.split(os.sep)
|
||||||
@ -487,7 +488,7 @@ class ChromaDBFileWatcher(FileSystemEventHandler):
|
|||||||
meta["chunk_end"] = end
|
meta["chunk_end"] = end
|
||||||
return "".join(lines[start:end])
|
return "".join(lines[start:end])
|
||||||
except:
|
except:
|
||||||
logging.warning(f"Unable to open {meta["source_file"]}")
|
logging.warning(f"Unable to open {source_file}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Cosine Distance Equivalent Similarity Retrieval Characteristics
|
# Cosine Distance Equivalent Similarity Retrieval Characteristics
|
||||||
|
Loading…
x
Reference in New Issue
Block a user