Skill tracking almost working
This commit is contained in:
parent
cb97cabfc3
commit
7586725f11
@ -20,75 +20,144 @@ import CheckCircleIcon from '@mui/icons-material/CheckCircle';
|
||||
import ErrorIcon from '@mui/icons-material/Error';
|
||||
import PendingIcon from '@mui/icons-material/Pending';
|
||||
import WarningIcon from '@mui/icons-material/Warning';
|
||||
import { Candidate, ChatMessage, ChatMessageBase, ChatMessageUser, ChatSession, JobRequirements, SkillMatch } from 'types/types';
|
||||
import { useAuth } from 'hooks/AuthContext';
|
||||
import { BackstoryPageProps } from './BackstoryTab';
|
||||
import { toCamelCase } from 'types/conversion';
|
||||
|
||||
// Define TypeScript interfaces for our data structures
|
||||
interface Citation {
|
||||
text: string;
|
||||
source: string;
|
||||
relevance: number; // 0-100 scale
|
||||
|
||||
interface Job {
|
||||
title: string;
|
||||
description: string;
|
||||
}
|
||||
|
||||
interface SkillMatch {
|
||||
requirement: string;
|
||||
status: 'pending' | 'complete' | 'error';
|
||||
matchScore: number; // 0-100 scale
|
||||
assessment: string;
|
||||
citations: Citation[];
|
||||
interface JobAnalysisProps extends BackstoryPageProps {
|
||||
job: Job;
|
||||
candidate: Candidate;
|
||||
}
|
||||
|
||||
interface JobAnalysisProps {
|
||||
jobTitle: string;
|
||||
candidateName: string;
|
||||
// This function would connect to your backend and return updates
|
||||
fetchRequirements: () => Promise<string[]>;
|
||||
// This function would fetch match data for a specific requirement
|
||||
fetchMatchForRequirement: (requirement: string) => Promise<SkillMatch>;
|
||||
}
|
||||
const defaultMessage: ChatMessageUser = {
|
||||
type: "preparing", status: "done", sender: "user", sessionId: "", timestamp: new Date(), content: ""
|
||||
};
|
||||
|
||||
const JobMatchAnalysis: React.FC<JobAnalysisProps> = ({
|
||||
jobTitle,
|
||||
candidateName,
|
||||
fetchRequirements,
|
||||
fetchMatchForRequirement
|
||||
}) => {
|
||||
const JobMatchAnalysis: React.FC<JobAnalysisProps> = (props: JobAnalysisProps) => {
|
||||
const {
|
||||
job,
|
||||
candidate,
|
||||
setSnack,
|
||||
} = props
|
||||
const { apiClient } = useAuth();
|
||||
const theme = useTheme();
|
||||
const [jobRequirements, setJobRequirements] = useState<JobRequirements | null>(null);
|
||||
const [requirements, setRequirements] = useState<string[]>([]);
|
||||
const [skillMatches, setSkillMatches] = useState<SkillMatch[]>([]);
|
||||
const [loadingRequirements, setLoadingRequirements] = useState<boolean>(true);
|
||||
const [creatingSession, setCreatingSession] = useState<boolean>(false);
|
||||
const [loadingRequirements, setLoadingRequirements] = useState<boolean>(false);
|
||||
const [expanded, setExpanded] = useState<string | false>(false);
|
||||
const [overallScore, setOverallScore] = useState<number>(0);
|
||||
const [requirementsSession, setRequirementsSession] = useState<ChatSession | null>(null);
|
||||
const [statusMessage, setStatusMessage] = useState<ChatMessage | null>(null);
|
||||
|
||||
// Handle accordion expansion
|
||||
const handleAccordionChange = (panel: string) => (event: React.SyntheticEvent, isExpanded: boolean) => {
|
||||
setExpanded(isExpanded ? panel : false);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (requirementsSession || creatingSession) {
|
||||
return;
|
||||
}
|
||||
|
||||
const createSession = async () => {
|
||||
try {
|
||||
const session: ChatSession = await apiClient.createCandidateChatSession(
|
||||
candidate.username,
|
||||
'job_requirements',
|
||||
`Generate requirements for ${job.title}`
|
||||
);
|
||||
setSnack("Job analysis session started");
|
||||
setRequirementsSession(session);
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
setSnack("Unable to create requirements session", "error");
|
||||
}
|
||||
setCreatingSession(false);
|
||||
};
|
||||
setCreatingSession(true);
|
||||
createSession();
|
||||
}, [requirementsSession, apiClient, candidate]);
|
||||
|
||||
// Fetch initial requirements
|
||||
useEffect(() => {
|
||||
if (!job.description || !requirementsSession || loadingRequirements || jobRequirements) {
|
||||
return;
|
||||
}
|
||||
|
||||
const getRequirements = async () => {
|
||||
setLoadingRequirements(true);
|
||||
try {
|
||||
const fetchedRequirements = await fetchRequirements();
|
||||
setRequirements(fetchedRequirements);
|
||||
|
||||
const chatMessage: ChatMessageUser = { ...defaultMessage, sessionId: requirementsSession.id || '', content: job.description };
|
||||
apiClient.sendMessageStream(chatMessage, {
|
||||
onMessage: (msg: ChatMessage) => {
|
||||
console.log(`onMessage: ${msg.type}`, msg);
|
||||
if (msg.type === "response") {
|
||||
const incoming: any = toCamelCase<JobRequirements>(JSON.parse(msg.content || ''));
|
||||
const requirements: string[] = ['technicalSkills', 'experienceRequirements'].flatMap((type) => {
|
||||
return ['required', 'preferred'].flatMap((level) => {
|
||||
return incoming[type][level].map((s: string) => s);
|
||||
})
|
||||
});
|
||||
['softSkills', 'experience', 'education', 'certifications', 'preferredAttributes'].forEach(l => {
|
||||
if (incoming[l]) {
|
||||
incoming[l].forEach((s: string) => requirements.push(s));
|
||||
}
|
||||
});
|
||||
|
||||
// Initialize skill matches with pending status
|
||||
const initialSkillMatches = fetchedRequirements.map(req => ({
|
||||
requirement: req,
|
||||
status: 'pending' as const,
|
||||
matchScore: 0,
|
||||
assessment: '',
|
||||
citations: []
|
||||
}));
|
||||
|
||||
setSkillMatches(initialSkillMatches);
|
||||
setLoadingRequirements(false);
|
||||
const initialSkillMatches = requirements.map(req => ({
|
||||
requirement: req,
|
||||
status: 'pending' as const,
|
||||
matchScore: 0,
|
||||
assessment: '',
|
||||
citations: []
|
||||
}));
|
||||
|
||||
setRequirements(requirements);
|
||||
setSkillMatches(initialSkillMatches);
|
||||
setStatusMessage(null);
|
||||
setLoadingRequirements(false);
|
||||
}
|
||||
},
|
||||
onError: (error: string | ChatMessageBase) => {
|
||||
console.log("onError:", error);
|
||||
// Type-guard to determine if this is a ChatMessageBase or a string
|
||||
if (typeof error === "object" && error !== null && "content" in error) {
|
||||
setSnack(error.content || 'Error obtaining requirements from job description.', "error");
|
||||
} else {
|
||||
setSnack(error as string, "error");
|
||||
}
|
||||
setLoadingRequirements(false);
|
||||
},
|
||||
onStreaming: (chunk: ChatMessageBase) => {
|
||||
// console.log("onStreaming:", chunk);
|
||||
},
|
||||
onStatusChange: (status: string) => {
|
||||
console.log(`onStatusChange: ${status}`);
|
||||
},
|
||||
onComplete: () => {
|
||||
console.log("onComplete");
|
||||
setStatusMessage(null);
|
||||
setLoadingRequirements(false);
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("Error fetching requirements:", error);
|
||||
console.error('Failed to send message:', error);
|
||||
setLoadingRequirements(false);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
getRequirements();
|
||||
}, [fetchRequirements]);
|
||||
}, [job, requirementsSession]);
|
||||
|
||||
// Fetch match data for each requirement
|
||||
useEffect(() => {
|
||||
@ -98,14 +167,14 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = ({
|
||||
// Process requirements one by one
|
||||
for (let i = 0; i < requirements.length; i++) {
|
||||
try {
|
||||
const match = await fetchMatchForRequirement(requirements[i]);
|
||||
|
||||
const match: SkillMatch = await apiClient.candidateMatchForRequirement(candidate.id || '', requirements[i]);
|
||||
console.log(match);
|
||||
setSkillMatches(prev => {
|
||||
const updated = [...prev];
|
||||
updated[i] = match;
|
||||
return updated;
|
||||
});
|
||||
|
||||
|
||||
// Update overall score
|
||||
setSkillMatches(current => {
|
||||
const completedMatches = current.filter(match => match.status === 'complete');
|
||||
@ -133,7 +202,7 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = ({
|
||||
if (!loadingRequirements) {
|
||||
fetchMatchData();
|
||||
}
|
||||
}, [requirements, loadingRequirements, fetchMatchForRequirement]);
|
||||
}, [requirements, loadingRequirements]);
|
||||
|
||||
// Get color based on match score
|
||||
const getMatchColor = (score: number): string => {
|
||||
@ -165,13 +234,13 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = ({
|
||||
|
||||
<Grid size={{ xs: 12, md: 6 }}>
|
||||
<Typography variant="h6" component="h2">
|
||||
Job: {jobTitle}
|
||||
Job: {job.title}
|
||||
</Typography>
|
||||
</Grid>
|
||||
|
||||
<Grid size={{ xs: 12, md: 6 }}>
|
||||
<Typography variant="h6" component="h2">
|
||||
Candidate: {candidateName}
|
||||
Candidate: {candidate.fullName}
|
||||
</Typography>
|
||||
</Grid>
|
||||
|
||||
@ -329,7 +398,7 @@ const JobMatchAnalysis: React.FC<JobAnalysisProps> = ({
|
||||
Supporting Evidence:
|
||||
</Typography>
|
||||
|
||||
{match.citations.length > 0 ? (
|
||||
{match.citations && match.citations.length > 0 ? (
|
||||
match.citations.map((citation, citIndex) => (
|
||||
<Card
|
||||
key={citIndex}
|
||||
|
@ -1,153 +0,0 @@
|
||||
import React from 'react';
|
||||
import { JobMatchAnalysis } from '../components/JobMatchAnalysis';
|
||||
|
||||
// Mock data and functions to simulate your backend
|
||||
const mockRequirements = [
|
||||
"5+ years of React development experience",
|
||||
"Strong TypeScript skills",
|
||||
"Experience with RESTful APIs",
|
||||
"Knowledge of state management solutions (Redux, Context API)",
|
||||
"Experience with CI/CD pipelines",
|
||||
"Cloud platform experience (AWS, Azure, GCP)"
|
||||
];
|
||||
|
||||
// Simulates fetching requirements with a delay
|
||||
const mockFetchRequirements = async (): Promise<string[]> => {
|
||||
return new Promise((resolve) => {
|
||||
setTimeout(() => {
|
||||
resolve(mockRequirements);
|
||||
}, 1500); // Simulate network delay
|
||||
});
|
||||
};
|
||||
|
||||
// Simulates fetching match data for a requirement with varying delays
|
||||
const mockFetchMatchForRequirement = async (requirement: string): Promise<any> => {
|
||||
// Create different mock responses based on the requirement
|
||||
const mockResponses: Record<string, any> = {
|
||||
"5+ years of React development experience": {
|
||||
requirement: "5+ years of React development experience",
|
||||
status: "complete",
|
||||
matchScore: 85,
|
||||
assessment: "The candidate demonstrates extensive React experience spanning over 6 years, with a strong portfolio of complex applications and deep understanding of React's component lifecycle and hooks.",
|
||||
citations: [
|
||||
{
|
||||
text: "Led frontend development team of 5 engineers to rebuild our customer portal using React and TypeScript, resulting in 40% improved performance and 30% reduction in bugs.",
|
||||
source: "Resume, Work Experience",
|
||||
relevance: 95
|
||||
},
|
||||
{
|
||||
text: "Developed and maintained reusable React component library used across 12 different products within the organization.",
|
||||
source: "Resume, Work Experience",
|
||||
relevance: 90
|
||||
},
|
||||
{
|
||||
text: "I've been working with React since 2017, building everything from small widgets to enterprise applications.",
|
||||
source: "Cover Letter",
|
||||
relevance: 85
|
||||
}
|
||||
]
|
||||
},
|
||||
"Strong TypeScript skills": {
|
||||
requirement: "Strong TypeScript skills",
|
||||
status: "complete",
|
||||
matchScore: 90,
|
||||
assessment: "The candidate shows excellent TypeScript proficiency through their work history and personal projects. They have implemented complex type systems and demonstrate an understanding of advanced TypeScript features.",
|
||||
citations: [
|
||||
{
|
||||
text: "Converted a legacy JavaScript codebase of 100,000+ lines to TypeScript, implementing strict type checking and reducing runtime errors by 70%.",
|
||||
source: "Resume, Projects",
|
||||
relevance: 98
|
||||
},
|
||||
{
|
||||
text: "Created comprehensive TypeScript interfaces for our GraphQL API, ensuring type safety across the entire application stack.",
|
||||
source: "Resume, Technical Skills",
|
||||
relevance: 95
|
||||
}
|
||||
]
|
||||
},
|
||||
"Experience with RESTful APIs": {
|
||||
requirement: "Experience with RESTful APIs",
|
||||
status: "complete",
|
||||
matchScore: 75,
|
||||
assessment: "The candidate has good experience with RESTful APIs, having both consumed and designed them. They understand REST principles but have less documented experience with API versioning and caching strategies.",
|
||||
citations: [
|
||||
{
|
||||
text: "Designed and implemented a RESTful API serving over 1M requests daily with a focus on performance and scalability.",
|
||||
source: "Resume, Technical Projects",
|
||||
relevance: 85
|
||||
},
|
||||
{
|
||||
text: "Worked extensively with third-party APIs including Stripe, Twilio, and Salesforce to integrate payment processing and communication features.",
|
||||
source: "Resume, Work Experience",
|
||||
relevance: 70
|
||||
}
|
||||
]
|
||||
},
|
||||
"Knowledge of state management solutions (Redux, Context API)": {
|
||||
requirement: "Knowledge of state management solutions (Redux, Context API)",
|
||||
status: "complete",
|
||||
matchScore: 65,
|
||||
assessment: "The candidate has moderate experience with state management, primarily using Redux. There is less evidence of Context API usage, which could indicate a knowledge gap in more modern React state management approaches.",
|
||||
citations: [
|
||||
{
|
||||
text: "Implemented Redux for global state management in an e-commerce application, handling complex state logic for cart, user preferences, and product filtering.",
|
||||
source: "Resume, Skills",
|
||||
relevance: 80
|
||||
},
|
||||
{
|
||||
text: "My experience includes working with state management libraries like Redux and MobX.",
|
||||
source: "Cover Letter",
|
||||
relevance: 60
|
||||
}
|
||||
]
|
||||
},
|
||||
"Experience with CI/CD pipelines": {
|
||||
requirement: "Experience with CI/CD pipelines",
|
||||
status: "complete",
|
||||
matchScore: 40,
|
||||
assessment: "The candidate shows limited experience with CI/CD pipelines. While they mention some exposure to Jenkins and GitLab CI, there is insufficient evidence of setting up or maintaining comprehensive CI/CD workflows.",
|
||||
citations: [
|
||||
{
|
||||
text: "Familiar with CI/CD tools including Jenkins and GitLab CI.",
|
||||
source: "Resume, Skills",
|
||||
relevance: 40
|
||||
}
|
||||
]
|
||||
},
|
||||
"Cloud platform experience (AWS, Azure, GCP)": {
|
||||
requirement: "Cloud platform experience (AWS, Azure, GCP)",
|
||||
status: "complete",
|
||||
matchScore: 30,
|
||||
assessment: "The candidate demonstrates minimal experience with cloud platforms. There is a brief mention of AWS S3 and Lambda, but no substantial evidence of deeper cloud architecture knowledge or experience with Azure or GCP.",
|
||||
citations: [
|
||||
{
|
||||
text: "Used AWS S3 for file storage and Lambda for image processing in a photo sharing application.",
|
||||
source: "Resume, Projects",
|
||||
relevance: 35
|
||||
}
|
||||
]
|
||||
}
|
||||
};
|
||||
|
||||
// Return a promise that resolves with the mock data after a delay
|
||||
return new Promise((resolve) => {
|
||||
// Different requirements resolve at different speeds to simulate real-world analysis
|
||||
const delay = Math.random() * 5000 + 2000; // 2-7 seconds
|
||||
setTimeout(() => {
|
||||
resolve(mockResponses[requirement]);
|
||||
}, delay);
|
||||
});
|
||||
};
|
||||
|
||||
const DemoComponent: React.FC = () => {
|
||||
return (
|
||||
<JobMatchAnalysis
|
||||
jobTitle="Senior Frontend Developer"
|
||||
candidateName="Alex Johnson"
|
||||
fetchRequirements={mockFetchRequirements}
|
||||
fetchMatchForRequirement={mockFetchMatchForRequirement}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export { DemoComponent };
|
@ -187,11 +187,11 @@ const GenerateCandidate = (props: BackstoryElementProps) => {
|
||||
controllerRef.current = apiClient.sendMessageStream(chatMessage, {
|
||||
onMessage: async (msg: ChatMessage) => {
|
||||
console.log(`onMessage: ${msg.type} ${msg.content}`, msg);
|
||||
if (msg.type === "heartbeat") {
|
||||
if (msg.type === "heartbeat" && msg.content) {
|
||||
const heartbeat = JSON.parse(msg.content);
|
||||
setTimestamp(heartbeat.timestamp);
|
||||
}
|
||||
if (msg.type === "thinking") {
|
||||
if (msg.type === "thinking" && msg.content) {
|
||||
const status = JSON.parse(msg.content);
|
||||
setProcessingMessage({ ...defaultMessage, content: status.message });
|
||||
}
|
||||
|
@ -47,7 +47,8 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
|
||||
const theme = useTheme();
|
||||
const { user } = useAuth();
|
||||
const { selectedCandidate, setSelectedCandidate } = useSelectedCandidate()
|
||||
|
||||
const { setSnack, submitQuery } = props;
|
||||
const backstoryProps = { setSnack, submitQuery };
|
||||
// State management
|
||||
const [activeStep, setActiveStep] = useState(0);
|
||||
const [jobDescription, setJobDescription] = useState('');
|
||||
@ -57,7 +58,6 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [openUploadDialog, setOpenUploadDialog] = useState(false);
|
||||
const { apiClient } = useAuth();
|
||||
const { setSnack } = props;
|
||||
const [candidates, setCandidates] = useState<Candidate[] | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
@ -421,6 +421,7 @@ const JobAnalysisPage: React.FC<BackstoryPageProps> = (props: BackstoryPageProps
|
||||
<JobMatchAnalysis
|
||||
job={{ title: jobTitle, description: jobDescription }}
|
||||
candidate={selectedCandidate}
|
||||
{...backstoryProps}
|
||||
/>
|
||||
)}
|
||||
</Box>
|
||||
|
@ -824,6 +824,18 @@ class ApiClient {
|
||||
return result;
|
||||
}
|
||||
|
||||
async candidateMatchForRequirement(candidate_id: string, requirement: string) : Promise<Types.SkillMatch> {
|
||||
const response = await fetch(`${this.baseUrl}/candidates/${candidate_id}/skill-match`, {
|
||||
method: 'POST',
|
||||
headers: this.defaultHeaders,
|
||||
body: JSON.stringify(requirement)
|
||||
});
|
||||
|
||||
const result = await handleApiResponse<Types.SkillMatch>(response);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
async updateCandidateDocument(document: Types.Document) : Promise<Types.Document> {
|
||||
const request : Types.DocumentUpdateRequest = {
|
||||
filename: document.filename,
|
||||
@ -1040,7 +1052,11 @@ class ApiClient {
|
||||
|
||||
default:
|
||||
incomingMessageList.push(convertedIncoming);
|
||||
options.onMessage?.(convertedIncoming);
|
||||
try {
|
||||
options.onMessage?.(convertedIncoming);
|
||||
} catch (error) {
|
||||
console.error('onMessage handler failed: ', error);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
// Generated TypeScript types from Pydantic models
|
||||
// Source: src/backend/models.py
|
||||
// Generated on: 2025-06-03T23:59:28.355326
|
||||
// Generated on: 2025-06-04T03:59:11.250216
|
||||
// DO NOT EDIT MANUALLY - This file is auto-generated
|
||||
|
||||
// ============================
|
||||
@ -13,11 +13,11 @@ export type ActivityType = "login" | "search" | "view_job" | "apply_job" | "mess
|
||||
|
||||
export type ApplicationStatus = "applied" | "reviewing" | "interview" | "offer" | "rejected" | "accepted" | "withdrawn";
|
||||
|
||||
export type ChatContextType = "job_search" | "job_requirements" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_image" | "rag_search";
|
||||
export type ChatContextType = "job_search" | "job_requirements" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_image" | "rag_search" | "skill_match";
|
||||
|
||||
export type ChatMessageType = "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||
|
||||
export type ChatSenderType = "user" | "assistant" | "system";
|
||||
export type ChatSenderType = "user" | "assistant" | "agent" | "system";
|
||||
|
||||
export type ChatStatusType = "initializing" | "streaming" | "status" | "done" | "error";
|
||||
|
||||
@ -49,6 +49,8 @@ export type SearchType = "similarity" | "mmr" | "hybrid" | "keyword";
|
||||
|
||||
export type SkillLevel = "beginner" | "intermediate" | "advanced" | "expert";
|
||||
|
||||
export type SkillStatus = "pending" | "complete" | "error";
|
||||
|
||||
export type SocialPlatform = "linkedin" | "twitter" | "github" | "dribbble" | "behance" | "website" | "other";
|
||||
|
||||
export type SortOrder = "asc" | "desc";
|
||||
@ -272,7 +274,7 @@ export interface Certification {
|
||||
}
|
||||
|
||||
export interface ChatContext {
|
||||
type: "job_search" | "job_requirements" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_image" | "rag_search";
|
||||
type: "job_search" | "job_requirements" | "candidate_chat" | "interview_prep" | "resume_review" | "general" | "generate_persona" | "generate_profile" | "generate_image" | "rag_search" | "skill_match";
|
||||
relatedEntityId?: string;
|
||||
relatedEntityType?: "job" | "candidate" | "employer";
|
||||
additionalContext?: Record<string, any>;
|
||||
@ -284,7 +286,7 @@ export interface ChatMessage {
|
||||
senderId?: string;
|
||||
status: "initializing" | "streaming" | "status" | "done" | "error";
|
||||
type: "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||
sender: "user" | "assistant" | "system";
|
||||
sender: "user" | "assistant" | "agent" | "system";
|
||||
timestamp?: Date;
|
||||
tunables?: Tunables;
|
||||
content: string;
|
||||
@ -297,7 +299,7 @@ export interface ChatMessageBase {
|
||||
senderId?: string;
|
||||
status: "initializing" | "streaming" | "status" | "done" | "error";
|
||||
type: "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||
sender: "user" | "assistant" | "system";
|
||||
sender: "user" | "assistant" | "agent" | "system";
|
||||
timestamp?: Date;
|
||||
tunables?: Tunables;
|
||||
content: string;
|
||||
@ -328,7 +330,7 @@ export interface ChatMessageRagSearch {
|
||||
senderId?: string;
|
||||
status: "initializing" | "streaming" | "status" | "done" | "error";
|
||||
type: "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||
sender: "user" | "assistant" | "system";
|
||||
sender: "user" | "assistant" | "agent" | "system";
|
||||
timestamp?: Date;
|
||||
tunables?: Tunables;
|
||||
content: string;
|
||||
@ -341,7 +343,7 @@ export interface ChatMessageUser {
|
||||
senderId?: string;
|
||||
status: "initializing" | "streaming" | "status" | "done" | "error";
|
||||
type: "error" | "generating" | "info" | "preparing" | "processing" | "heartbeat" | "response" | "searching" | "rag_result" | "system" | "thinking" | "tooling" | "user";
|
||||
sender: "user" | "assistant" | "system";
|
||||
sender: "user" | "assistant" | "agent" | "system";
|
||||
timestamp?: Date;
|
||||
tunables?: Tunables;
|
||||
content: string;
|
||||
@ -386,6 +388,12 @@ export interface ChromaDBGetResponse {
|
||||
umapEmbedding3D?: Array<number>;
|
||||
}
|
||||
|
||||
export interface Citation {
|
||||
text: string;
|
||||
source: string;
|
||||
relevance: number;
|
||||
}
|
||||
|
||||
export interface CreateCandidateRequest {
|
||||
email: string;
|
||||
username: string;
|
||||
@ -613,6 +621,16 @@ export interface JobListResponse {
|
||||
meta?: Record<string, any>;
|
||||
}
|
||||
|
||||
export interface JobRequirements {
|
||||
technicalSkills: Requirements;
|
||||
experienceRequirements: Requirements;
|
||||
softSkills?: Array<string>;
|
||||
experience?: Array<string>;
|
||||
education?: Array<string>;
|
||||
certifications?: Array<string>;
|
||||
preferredAttributes?: Array<string>;
|
||||
}
|
||||
|
||||
export interface JobResponse {
|
||||
success: boolean;
|
||||
data?: Job;
|
||||
@ -765,6 +783,11 @@ export interface RefreshToken {
|
||||
revokedReason?: string;
|
||||
}
|
||||
|
||||
export interface Requirements {
|
||||
required?: Array<string>;
|
||||
preferred?: Array<string>;
|
||||
}
|
||||
|
||||
export interface ResendVerificationRequest {
|
||||
email: string;
|
||||
}
|
||||
@ -810,6 +833,14 @@ export interface SkillAssessment {
|
||||
comments?: string;
|
||||
}
|
||||
|
||||
export interface SkillMatch {
|
||||
requirement: string;
|
||||
status: "pending" | "complete" | "error";
|
||||
matchScore: number;
|
||||
assessment: string;
|
||||
citations?: Array<Citation>;
|
||||
}
|
||||
|
||||
export interface SocialLink {
|
||||
platform: "linkedin" | "twitter" | "github" | "dribbble" | "behance" | "website" | "other";
|
||||
url: string;
|
||||
@ -857,233 +888,6 @@ export interface WorkExperience {
|
||||
achievements?: Array<string>;
|
||||
}
|
||||
|
||||
// ============================
|
||||
// Default Objects
|
||||
// ============================
|
||||
|
||||
// These objects contain the default values from your Pydantic models
|
||||
// Use them to initialize objects with sensible defaults:
|
||||
// const message: ChatMessage = { ...DefaultChatMessage, sessionId: '123', content: 'Hello' };
|
||||
|
||||
/**
|
||||
* Default values for BaseUser
|
||||
* Fields with defaults: isAdmin
|
||||
*/
|
||||
export const DefaultBaseUser: Partial<BaseUser> = {
|
||||
isAdmin: False
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for BaseUserWithType
|
||||
* Fields with defaults: isAdmin
|
||||
*/
|
||||
export const DefaultBaseUserWithType: Partial<BaseUserWithType> = {
|
||||
isAdmin: False
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for Candidate
|
||||
* Fields with defaults: isAdmin, userType, ragContentSize
|
||||
*/
|
||||
export const DefaultCandidate: Partial<Candidate> = {
|
||||
isAdmin: False,
|
||||
userType: "candidate",
|
||||
ragContentSize: 0
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for CandidateAI
|
||||
* Fields with defaults: isAdmin, userType, ragContentSize, isAI
|
||||
*/
|
||||
export const DefaultCandidateAI: Partial<CandidateAI> = {
|
||||
isAdmin: False,
|
||||
userType: "candidate",
|
||||
ragContentSize: 0,
|
||||
isAI: True
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for ChatContext
|
||||
* Fields with defaults: additionalContext
|
||||
*/
|
||||
export const DefaultChatContext: Partial<ChatContext> = {
|
||||
additionalContext: {}
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for ChatMessage
|
||||
* Fields with defaults: status, type, sender, content
|
||||
*/
|
||||
export const DefaultChatMessage: Partial<ChatMessage> = {
|
||||
status: "initializing",
|
||||
type: "preparing",
|
||||
sender: "system",
|
||||
content: ""
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for ChatMessageBase
|
||||
* Fields with defaults: status, type, sender, content
|
||||
*/
|
||||
export const DefaultChatMessageBase: Partial<ChatMessageBase> = {
|
||||
status: "initializing",
|
||||
type: "preparing",
|
||||
sender: "system",
|
||||
content: ""
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for ChatMessageMetaData
|
||||
* Fields with defaults: model, temperature, maxTokens, topP, evalCount, evalDuration, promptEvalCount, promptEvalDuration
|
||||
*/
|
||||
export const DefaultChatMessageMetaData: Partial<ChatMessageMetaData> = {
|
||||
model: "qwen2.5",
|
||||
temperature: 0.7,
|
||||
maxTokens: 8092,
|
||||
topP: 1,
|
||||
evalCount: 0,
|
||||
evalDuration: 0,
|
||||
promptEvalCount: 0,
|
||||
promptEvalDuration: 0
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for ChatMessageRagSearch
|
||||
* Fields with defaults: status, type, sender, content, dimensions
|
||||
*/
|
||||
export const DefaultChatMessageRagSearch: Partial<ChatMessageRagSearch> = {
|
||||
status: "done",
|
||||
type: "rag_result",
|
||||
sender: "user",
|
||||
content: "",
|
||||
dimensions: 3
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for ChatMessageUser
|
||||
* Fields with defaults: status, type, sender, content
|
||||
*/
|
||||
export const DefaultChatMessageUser: Partial<ChatMessageUser> = {
|
||||
status: "done",
|
||||
type: "user",
|
||||
sender: "user",
|
||||
content: ""
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for ChatOptions
|
||||
* Fields with defaults: seed, temperature
|
||||
*/
|
||||
export const DefaultChatOptions: Partial<ChatOptions> = {
|
||||
seed: 8911,
|
||||
temperature: 0.7
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for ChatSession
|
||||
* Fields with defaults: isArchived
|
||||
*/
|
||||
export const DefaultChatSession: Partial<ChatSession> = {
|
||||
isArchived: False
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for ChromaDBGetResponse
|
||||
* Fields with defaults: ids, embeddings, documents, metadatas, distances, name, size, dimensions, query
|
||||
*/
|
||||
export const DefaultChromaDBGetResponse: Partial<ChromaDBGetResponse> = {
|
||||
ids: [],
|
||||
embeddings: [],
|
||||
documents: [],
|
||||
metadatas: [],
|
||||
distances: [],
|
||||
name: "",
|
||||
size: 0,
|
||||
dimensions: 3,
|
||||
query: ""
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for Document
|
||||
* Fields with defaults: includeInRAG, ragChunks
|
||||
*/
|
||||
export const DefaultDocument: Partial<Document> = {
|
||||
includeInRAG: True,
|
||||
ragChunks: 0
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for Employer
|
||||
* Fields with defaults: isAdmin, userType
|
||||
*/
|
||||
export const DefaultEmployer: Partial<Employer> = {
|
||||
isAdmin: False,
|
||||
userType: "employer"
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for Job
|
||||
* Fields with defaults: views, applicationCount
|
||||
*/
|
||||
export const DefaultJob: Partial<Job> = {
|
||||
views: 0,
|
||||
applicationCount: 0
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for LLMMessage
|
||||
* Fields with defaults: role, content, toolCalls
|
||||
*/
|
||||
export const DefaultLLMMessage: Partial<LLMMessage> = {
|
||||
role: "",
|
||||
content: "",
|
||||
toolCalls: {}
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for MFAVerifyRequest
|
||||
* Fields with defaults: rememberDevice
|
||||
*/
|
||||
export const DefaultMFAVerifyRequest: Partial<MFAVerifyRequest> = {
|
||||
rememberDevice: False
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for PaginatedRequest
|
||||
* Fields with defaults: page, limit
|
||||
*/
|
||||
export const DefaultPaginatedRequest: Partial<PaginatedRequest> = {
|
||||
page: 1,
|
||||
limit: 20
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for RagEntry
|
||||
* Fields with defaults: description, enabled
|
||||
*/
|
||||
export const DefaultRagEntry: Partial<RagEntry> = {
|
||||
description: "",
|
||||
enabled: True
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for SearchQuery
|
||||
* Fields with defaults: page, limit
|
||||
*/
|
||||
export const DefaultSearchQuery: Partial<SearchQuery> = {
|
||||
page: 1,
|
||||
limit: 20
|
||||
};
|
||||
|
||||
/**
|
||||
* Default values for Tunables
|
||||
* Fields with defaults: enableRAG, enableTools, enableContext
|
||||
*/
|
||||
export const DefaultTunables: Partial<Tunables> = {
|
||||
enableRAG: True,
|
||||
enableTools: True,
|
||||
enableContext: True
|
||||
};
|
||||
// ============================
|
||||
// Date Conversion Functions
|
||||
// ============================
|
||||
|
@ -17,6 +17,7 @@ from typing import (
|
||||
import json
|
||||
import time
|
||||
import inspect
|
||||
import re
|
||||
from abc import ABC
|
||||
import asyncio
|
||||
from datetime import datetime, UTC
|
||||
@ -79,6 +80,12 @@ class Agent(BaseModel, ABC):
|
||||
def context_size(self, value: int):
|
||||
Agent._context_size = value
|
||||
|
||||
async def get_last_item(self, generator):
|
||||
last_item = None
|
||||
async for item in generator:
|
||||
last_item = item
|
||||
return last_item
|
||||
|
||||
def set_optimal_context_size(
|
||||
self, llm: Any, model: str, prompt: str, ctx_buffer=2048
|
||||
) -> int:
|
||||
@ -297,7 +304,7 @@ class Agent(BaseModel, ABC):
|
||||
self,
|
||||
chat_message: ChatMessage,
|
||||
top_k: int=defines.default_rag_top_k,
|
||||
threshold: float=defines.default_rag_threshold
|
||||
threshold: float=defines.default_rag_threshold,
|
||||
) -> AsyncGenerator[ChatMessage, None]:
|
||||
"""
|
||||
Generate RAG results for the given query.
|
||||
@ -320,24 +327,29 @@ class Agent(BaseModel, ABC):
|
||||
)
|
||||
|
||||
if not self.user:
|
||||
logger.error("No user set for RAG generation")
|
||||
rag_message.status = ChatStatusType.DONE
|
||||
rag_message.content = "No user connected to this chat, so no RAG content."
|
||||
rag_message.content = ""
|
||||
yield rag_message
|
||||
return
|
||||
|
||||
try:
|
||||
entries: int = 0
|
||||
user: Candidate = self.user
|
||||
rag_content: str = ""
|
||||
for rag in user.rags:
|
||||
if not rag.enabled:
|
||||
continue
|
||||
rag_message.type = ChatMessageType.SEARCHING
|
||||
rag_message.status = ChatStatusType.INITIALIZING
|
||||
rag_message.content = f"Checking RAG context {rag.name}..."
|
||||
yield rag_message
|
||||
status_message = ChatMessage(
|
||||
session_id=chat_message.session_id,
|
||||
sender=ChatSenderType.AGENT,
|
||||
status = ChatStatusType.INITIALIZING,
|
||||
type = ChatMessageType.SEARCHING,
|
||||
content = f"Checking RAG context {rag.name}...")
|
||||
yield status_message
|
||||
|
||||
chroma_results = user.file_watcher.find_similar(
|
||||
query=rag_message.content, top_k=top_k, threshold=threshold
|
||||
query=chat_message.content, top_k=top_k, threshold=threshold
|
||||
)
|
||||
if chroma_results:
|
||||
query_embedding = np.array(chroma_results["query_embedding"]).flatten()
|
||||
@ -360,15 +372,26 @@ class Agent(BaseModel, ABC):
|
||||
|
||||
entries += len(rag_metadata.documents)
|
||||
rag_message.metadata.rag_results.append(rag_metadata)
|
||||
rag_message.content = f"Results from {rag.name} RAG: {len(rag_metadata.documents)} results."
|
||||
yield rag_message
|
||||
|
||||
rag_message.content = (
|
||||
f"RAG context gathered from results from {entries} documents."
|
||||
)
|
||||
for index, metadata in enumerate(chroma_results["metadatas"]):
|
||||
content = "\n".join(
|
||||
[
|
||||
line.strip()
|
||||
for line in chroma_results["documents"][index].split("\n")
|
||||
if line
|
||||
]
|
||||
).strip()
|
||||
rag_content += f"""
|
||||
Source: {metadata.get("doc_type", "unknown")}: {metadata.get("path", "")}
|
||||
Document reference: {chroma_results["ids"][index]}
|
||||
Content: { content }
|
||||
"""
|
||||
rag_message.content = rag_content.strip()
|
||||
rag_message.type = ChatMessageType.RAG_RESULT
|
||||
rag_message.status = ChatStatusType.DONE
|
||||
yield rag_message
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
rag_message.status = ChatStatusType.ERROR
|
||||
rag_message.content = f"Error generating RAG results: {str(e)}"
|
||||
@ -377,6 +400,80 @@ class Agent(BaseModel, ABC):
|
||||
yield rag_message
|
||||
return
|
||||
|
||||
async def llm_one_shot(self, llm: Any, model: str, user_message: ChatMessageUser, system_prompt: str, temperature=0.7):
|
||||
chat_message = ChatMessage(
|
||||
session_id=user_message.session_id,
|
||||
tunables=user_message.tunables,
|
||||
status=ChatStatusType.INITIALIZING,
|
||||
type=ChatMessageType.PREPARING,
|
||||
sender=ChatSenderType.AGENT,
|
||||
content="",
|
||||
timestamp=datetime.now(UTC)
|
||||
)
|
||||
|
||||
self.set_optimal_context_size(
|
||||
llm, model, prompt=chat_message.content
|
||||
)
|
||||
|
||||
chat_message.metadata = ChatMessageMetaData()
|
||||
chat_message.metadata.options = ChatOptions(
|
||||
seed=8911,
|
||||
num_ctx=self.context_size,
|
||||
temperature=temperature, # Higher temperature to encourage tool usage
|
||||
)
|
||||
|
||||
messages: List[LLMMessage] = [
|
||||
LLMMessage(role="system", content=system_prompt),
|
||||
LLMMessage(role="user", content=user_message.content),
|
||||
]
|
||||
|
||||
# Reset the response for streaming
|
||||
chat_message.content = ""
|
||||
chat_message.type = ChatMessageType.GENERATING
|
||||
chat_message.status = ChatStatusType.STREAMING
|
||||
|
||||
logger.info(f"Message options: {chat_message.metadata.options.model_dump(exclude_unset=True)}")
|
||||
response = None
|
||||
for response in llm.chat(
|
||||
model=model,
|
||||
messages=messages,
|
||||
options={
|
||||
**chat_message.metadata.options.model_dump(exclude_unset=True),
|
||||
},
|
||||
stream=True,
|
||||
):
|
||||
if not response:
|
||||
chat_message.status = ChatStatusType.ERROR
|
||||
chat_message.content = "No response from LLM."
|
||||
yield chat_message
|
||||
return
|
||||
|
||||
chat_message.content += response.message.content
|
||||
|
||||
if not response.done:
|
||||
chat_chunk = model_cast.cast_to_model(ChatMessageBase, chat_message)
|
||||
chat_chunk.content = response.message.content
|
||||
yield chat_message
|
||||
continue
|
||||
|
||||
if not response:
|
||||
chat_message.status = ChatStatusType.ERROR
|
||||
chat_message.content = "No response from LLM."
|
||||
yield chat_message
|
||||
return
|
||||
|
||||
self.collect_metrics(response)
|
||||
chat_message.metadata.eval_count += response.eval_count
|
||||
chat_message.metadata.eval_duration += response.eval_duration
|
||||
chat_message.metadata.prompt_eval_count += response.prompt_eval_count
|
||||
chat_message.metadata.prompt_eval_duration += response.prompt_eval_duration
|
||||
self.context_tokens = (
|
||||
response.prompt_eval_count + response.eval_count
|
||||
)
|
||||
chat_message.type = ChatMessageType.RESPONSE
|
||||
chat_message.status = ChatStatusType.DONE
|
||||
yield chat_message
|
||||
|
||||
async def generate(
|
||||
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate | None, temperature=0.7
|
||||
) -> AsyncGenerator[ChatMessage | ChatMessageBase, None]:
|
||||
@ -392,6 +489,10 @@ class Agent(BaseModel, ABC):
|
||||
timestamp=datetime.now(UTC)
|
||||
)
|
||||
|
||||
self.set_optimal_context_size(
|
||||
llm, model, prompt=chat_message.content
|
||||
)
|
||||
|
||||
chat_message.metadata = ChatMessageMetaData()
|
||||
chat_message.metadata.options = ChatOptions(
|
||||
seed=8911,
|
||||
@ -679,6 +780,20 @@ Content: { content }
|
||||
|
||||
# return
|
||||
|
||||
def extract_json_from_text(self, text: str) -> str:
|
||||
"""Extract JSON string from text that may contain other content."""
|
||||
json_pattern = r"```json\s*([\s\S]*?)\s*```"
|
||||
match = re.search(json_pattern, text)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
|
||||
# Try to find JSON without the markdown code block
|
||||
json_pattern = r"({[\s\S]*})"
|
||||
match = re.search(json_pattern, text)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
|
||||
raise ValueError("No JSON found in the response")
|
||||
|
||||
# Register the base agent
|
||||
agent_registry.register(Agent._agent_type, Agent)
|
||||
|
@ -315,69 +315,6 @@ class GeneratePersona(Agent):
|
||||
self.first_name, self.last_name, self.ethnicity, self.gender = self.generator.generate_random_name()
|
||||
self.full_name = f"{self.first_name} {self.last_name}"
|
||||
|
||||
async def call_llm(self, llm: Any, model: str, user_message: ChatMessageUser, system_prompt: str, temperature=0.7):
|
||||
chat_message = ChatMessage(
|
||||
session_id=user_message.session_id,
|
||||
tunables=user_message.tunables,
|
||||
status=ChatStatusType.INITIALIZING,
|
||||
type=ChatMessageType.PREPARING,
|
||||
sender=ChatSenderType.ASSISTANT,
|
||||
content="",
|
||||
timestamp=datetime.now(UTC)
|
||||
)
|
||||
|
||||
chat_message.metadata = ChatMessageMetaData()
|
||||
chat_message.metadata.options = ChatOptions(
|
||||
seed=8911,
|
||||
num_ctx=self.context_size,
|
||||
temperature=temperature, # Higher temperature to encourage tool usage
|
||||
)
|
||||
|
||||
messages: List[LLMMessage] = [
|
||||
LLMMessage(role="system", content=system_prompt),
|
||||
LLMMessage(role="user", content=user_message.content),
|
||||
]
|
||||
|
||||
# Reset the response for streaming
|
||||
chat_message.content = ""
|
||||
chat_message.type = ChatMessageType.GENERATING
|
||||
chat_message.status = ChatStatusType.STREAMING
|
||||
|
||||
for response in llm.chat(
|
||||
model=model,
|
||||
messages=messages,
|
||||
options={
|
||||
**chat_message.metadata.options.model_dump(exclude_unset=True),
|
||||
},
|
||||
stream=True,
|
||||
):
|
||||
if not response:
|
||||
chat_message.status = ChatStatusType.ERROR
|
||||
chat_message.content = "No response from LLM."
|
||||
yield chat_message
|
||||
return
|
||||
|
||||
chat_message.content += response.message.content
|
||||
|
||||
if not response.done:
|
||||
chat_chunk = model_cast.cast_to_model(ChatMessageBase, chat_message)
|
||||
chat_chunk.content = response.message.content
|
||||
yield chat_message
|
||||
continue
|
||||
|
||||
if response.done:
|
||||
self.collect_metrics(response)
|
||||
chat_message.metadata.eval_count += response.eval_count
|
||||
chat_message.metadata.eval_duration += response.eval_duration
|
||||
chat_message.metadata.prompt_eval_count += response.prompt_eval_count
|
||||
chat_message.metadata.prompt_eval_duration += response.prompt_eval_duration
|
||||
self.context_tokens = (
|
||||
response.prompt_eval_count + response.eval_count
|
||||
)
|
||||
chat_message.type = ChatMessageType.RESPONSE
|
||||
chat_message.status = ChatStatusType.DONE
|
||||
yield chat_message
|
||||
|
||||
async def generate(
|
||||
self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate, temperature=0.7
|
||||
):
|
||||
@ -409,7 +346,7 @@ Incorporate the following into the job description: {original_prompt}
|
||||
#
|
||||
logger.info(f"🤖 Generating persona for {self.full_name}")
|
||||
generating_message = None
|
||||
async for generating_message in self.call_llm(
|
||||
async for generating_message in self.llm_one_shot(
|
||||
llm=llm, model=model,
|
||||
user_message=user_message,
|
||||
system_prompt=generate_persona_system_prompt,
|
||||
@ -515,7 +452,7 @@ Incorporate the following into the job description: {original_prompt}
|
||||
user_message.content += f"""
|
||||
Make sure at least one of the candidate's job descriptions take into account the following: {original_prompt}."""
|
||||
|
||||
async for generating_message in self.call_llm(
|
||||
async for generating_message in self.llm_one_shot(
|
||||
llm=llm, model=model,
|
||||
user_message=user_message,
|
||||
system_prompt=generate_resume_system_prompt,
|
||||
|
@ -138,14 +138,10 @@ def is_date_type(python_type: Any) -> bool:
|
||||
|
||||
return False
|
||||
|
||||
def get_field_default_value(field_info: Any, debug: bool = False) -> tuple[bool, Any]:
|
||||
"""Extract the default value from a field, if it exists
|
||||
|
||||
Returns:
|
||||
tuple: (has_default, default_value)
|
||||
"""
|
||||
def get_default_enum_value(field_info: Any, debug: bool = False) -> Optional[Any]:
|
||||
"""Extract the specific enum value from a field's default, if it exists"""
|
||||
if not hasattr(field_info, 'default'):
|
||||
return False, None
|
||||
return None
|
||||
|
||||
default_val = field_info.default
|
||||
|
||||
@ -156,7 +152,7 @@ def get_field_default_value(field_info: Any, debug: bool = False) -> tuple[bool,
|
||||
if default_val is ... or default_val is None:
|
||||
if debug:
|
||||
print(f" └─ Default is undefined marker")
|
||||
return False, None
|
||||
return None
|
||||
|
||||
# Check for Pydantic's internal "PydanticUndefined" or similar markers
|
||||
default_str = str(default_val)
|
||||
@ -177,72 +173,17 @@ def get_field_default_value(field_info: Any, debug: bool = False) -> tuple[bool,
|
||||
if is_undefined_marker:
|
||||
if debug:
|
||||
print(f" └─ Default is undefined marker pattern")
|
||||
return False, None
|
||||
return None
|
||||
|
||||
# We have a real default value
|
||||
if debug:
|
||||
print(f" └─ Has real default value: {repr(default_val)}")
|
||||
return True, default_val
|
||||
|
||||
def convert_default_to_typescript(default_val: Any, debug: bool = False) -> str:
|
||||
"""Convert a Python default value to TypeScript literal"""
|
||||
if debug:
|
||||
print(f" 🔄 Converting default: {repr(default_val)} (type: {type(default_val)})")
|
||||
|
||||
# Handle None
|
||||
if default_val is None:
|
||||
return "undefined"
|
||||
|
||||
# Handle Enum instances
|
||||
# Check if it's an enum instance
|
||||
if isinstance(default_val, Enum):
|
||||
return f'"{default_val.value}"'
|
||||
if debug:
|
||||
print(f" └─ Default is enum instance: {default_val.value}")
|
||||
return default_val
|
||||
|
||||
# Handle basic types
|
||||
if isinstance(default_val, str):
|
||||
# Escape quotes and special characters
|
||||
escaped = default_val.replace('\\', '\\\\').replace('"', '\\"')
|
||||
return f'"{escaped}"'
|
||||
elif isinstance(default_val, (int, float)):
|
||||
return str(default_val)
|
||||
elif isinstance(default_val, bool):
|
||||
return "true" if default_val else "false"
|
||||
elif isinstance(default_val, list):
|
||||
if not default_val: # Empty list
|
||||
return "[]"
|
||||
# For non-empty lists, convert each item
|
||||
items = [convert_default_to_typescript(item, debug) for item in default_val]
|
||||
return f"[{', '.join(items)}]"
|
||||
elif isinstance(default_val, dict):
|
||||
if not default_val: # Empty dict
|
||||
return "{}"
|
||||
# For non-empty dicts, convert each key-value pair
|
||||
items = []
|
||||
for key, value in default_val.items():
|
||||
key_str = f'"{key}"' if isinstance(key, str) else str(key)
|
||||
value_str = convert_default_to_typescript(value, debug)
|
||||
items.append(f"{key_str}: {value_str}")
|
||||
return f"{{{', '.join(items)}}}"
|
||||
elif isinstance(default_val, datetime):
|
||||
# Convert datetime to ISO string, then wrap in new Date()
|
||||
iso_string = default_val.isoformat()
|
||||
return f'new Date("{iso_string}")'
|
||||
|
||||
# For other types, try to convert to string
|
||||
if debug:
|
||||
print(f" ⚠️ Unknown default type, converting to string: {type(default_val)}")
|
||||
|
||||
# Try to convert to a reasonable TypeScript representation
|
||||
try:
|
||||
if hasattr(default_val, '__dict__'):
|
||||
# It's an object, try to serialize its properties
|
||||
return "{}" # Fallback to empty object for complex types
|
||||
else:
|
||||
# Try string conversion
|
||||
str_val = str(default_val)
|
||||
escaped = str_val.replace('\\', '\\\\').replace('"', '\\"')
|
||||
return f'"{escaped}"'
|
||||
except:
|
||||
return "undefined"
|
||||
print(f" └─ Default is not an enum instance")
|
||||
return None
|
||||
|
||||
def python_type_to_typescript(python_type: Any, field_info: Any = None, debug: bool = False) -> str:
|
||||
"""Convert a Python type to TypeScript type string, considering field defaults"""
|
||||
@ -257,9 +198,14 @@ def python_type_to_typescript(python_type: Any, field_info: Any = None, debug: b
|
||||
if debug and original_type != python_type:
|
||||
print(f" 🔄 Unwrapped: {original_type} -> {python_type}")
|
||||
|
||||
# REMOVED: The problematic enum default checking that returns only the default value
|
||||
# This was causing the issue where enum fields would only show the default value
|
||||
# instead of all possible enum values
|
||||
# FIXED: Don't lock enum types to their default values
|
||||
# Instead, always return the full enum type
|
||||
if field_info:
|
||||
default_enum = get_default_enum_value(field_info, debug)
|
||||
if default_enum is not None:
|
||||
if debug:
|
||||
print(f" 🎯 Field has specific enum default: {default_enum.value}, but returning full enum type")
|
||||
# Don't return just the default value - continue to process the full enum type
|
||||
|
||||
# Handle None/null
|
||||
if python_type is type(None):
|
||||
@ -323,12 +269,9 @@ def python_type_to_typescript(python_type: Any, field_info: Any = None, debug: b
|
||||
literal_values.append(str(arg))
|
||||
return " | ".join(literal_values)
|
||||
|
||||
# Handle Enum types - THIS IS THE CORRECT BEHAVIOR
|
||||
# Return all possible enum values, not just the default
|
||||
# Handle Enum types
|
||||
if isinstance(python_type, type) and issubclass(python_type, Enum):
|
||||
enum_values = [f'"{v.value}"' for v in python_type]
|
||||
if debug:
|
||||
print(f" 🎯 Enum type detected: {python_type.__name__} with values: {enum_values}")
|
||||
return " | ".join(enum_values)
|
||||
|
||||
# Handle individual enum instances
|
||||
@ -433,12 +376,18 @@ def is_field_optional(field_info: Any, field_type: Any, debug: bool = False) ->
|
||||
print(f" └─ RESULT: Required (default is undefined marker)")
|
||||
return False
|
||||
|
||||
# FIXED: Fields with actual default values (including enums) should be REQUIRED
|
||||
# Special case: if field has a specific default value (like enum), it's required
|
||||
# because it will always have a value, just not optional for the consumer
|
||||
if isinstance(default_val, Enum):
|
||||
if debug:
|
||||
print(f" └─ RESULT: Required (has specific enum default: {default_val.value})")
|
||||
return False
|
||||
|
||||
# FIXED: Fields with actual default values (like [], "", 0) should be REQUIRED
|
||||
# because they will always have a value (either provided or the default)
|
||||
# This applies to enum fields with defaults as well
|
||||
if debug:
|
||||
print(f" └─ RESULT: Required (has actual default value - field will always have a value)")
|
||||
return False
|
||||
return False # Changed from True to False
|
||||
else:
|
||||
if debug:
|
||||
print(f" └─ No default attribute found")
|
||||
@ -472,7 +421,6 @@ def process_pydantic_model(model_class, debug: bool = False) -> Dict[str, Any]:
|
||||
interface_name = model_class.__name__
|
||||
properties = []
|
||||
date_fields = [] # Track date fields for conversion functions
|
||||
default_fields = [] # Track fields with default values for default object generation
|
||||
|
||||
if debug:
|
||||
print(f" 🔍 Processing model: {interface_name}")
|
||||
@ -498,17 +446,6 @@ def process_pydantic_model(model_class, debug: bool = False) -> Dict[str, Any]:
|
||||
if debug:
|
||||
print(f" Raw type: {field_type}")
|
||||
|
||||
# Check for default values
|
||||
has_default, default_value = get_field_default_value(field_info, debug)
|
||||
if has_default:
|
||||
ts_default = convert_default_to_typescript(default_value, debug)
|
||||
default_fields.append({
|
||||
'name': ts_name,
|
||||
'value': ts_default
|
||||
})
|
||||
if debug:
|
||||
print(f" 🎯 Default value: {repr(default_value)} -> {ts_default}")
|
||||
|
||||
# Check if this is a date field
|
||||
is_date = is_date_type(field_type)
|
||||
if debug:
|
||||
@ -525,7 +462,7 @@ def process_pydantic_model(model_class, debug: bool = False) -> Dict[str, Any]:
|
||||
elif debug and ('date' in str(field_type).lower() or 'time' in str(field_type).lower()):
|
||||
print(f" ⚠️ Field {ts_name} contains 'date'/'time' but not detected as date type: {field_type}")
|
||||
|
||||
# Pass field_info to the type converter (but now it won't override enum types)
|
||||
# Pass field_info to the type converter for default enum handling
|
||||
ts_type = python_type_to_typescript(field_type, field_info, debug)
|
||||
|
||||
# Check if optional
|
||||
@ -561,17 +498,6 @@ def process_pydantic_model(model_class, debug: bool = False) -> Dict[str, Any]:
|
||||
if debug:
|
||||
print(f" Raw type: {field_type}")
|
||||
|
||||
# Check for default values
|
||||
has_default, default_value = get_field_default_value(field_info, debug)
|
||||
if has_default:
|
||||
ts_default = convert_default_to_typescript(default_value, debug)
|
||||
default_fields.append({
|
||||
'name': ts_name,
|
||||
'value': ts_default
|
||||
})
|
||||
if debug:
|
||||
print(f" 🎯 Default value: {repr(default_value)} -> {ts_default}")
|
||||
|
||||
# Check if this is a date field
|
||||
is_date = is_date_type(field_type)
|
||||
if debug:
|
||||
@ -588,7 +514,7 @@ def process_pydantic_model(model_class, debug: bool = False) -> Dict[str, Any]:
|
||||
elif debug and ('date' in str(field_type).lower() or 'time' in str(field_type).lower()):
|
||||
print(f" ⚠️ Field {ts_name} contains 'date'/'time' but not detected as date type: {field_type}")
|
||||
|
||||
# Pass field_info to the type converter (but now it won't override enum types)
|
||||
# Pass field_info to the type converter for default enum handling
|
||||
ts_type = python_type_to_typescript(field_type, field_info, debug)
|
||||
|
||||
# For Pydantic v1, check required and default
|
||||
@ -609,8 +535,7 @@ def process_pydantic_model(model_class, debug: bool = False) -> Dict[str, Any]:
|
||||
return {
|
||||
'name': interface_name,
|
||||
'properties': properties,
|
||||
'date_fields': date_fields,
|
||||
'default_fields': default_fields
|
||||
'date_fields': date_fields
|
||||
}
|
||||
|
||||
def process_enum(enum_class) -> Dict[str, Any]:
|
||||
@ -624,159 +549,6 @@ def process_enum(enum_class) -> Dict[str, Any]:
|
||||
'values': " | ".join(values)
|
||||
}
|
||||
|
||||
def generate_default_objects(interfaces: List[Dict[str, Any]]) -> str:
|
||||
"""Generate TypeScript default objects for models with default values"""
|
||||
default_objects = []
|
||||
|
||||
for interface in interfaces:
|
||||
interface_name = interface['name']
|
||||
default_fields = interface.get('default_fields', [])
|
||||
|
||||
if not default_fields:
|
||||
continue # Skip interfaces without default values
|
||||
|
||||
object_name = f"Default{interface_name}"
|
||||
|
||||
# Generate default object
|
||||
obj_lines = [
|
||||
f"/**",
|
||||
f" * Default values for {interface_name}",
|
||||
f" * Fields with defaults: {', '.join([f['name'] for f in default_fields])}",
|
||||
f" */",
|
||||
f"export const {object_name}: Partial<{interface_name}> = {{"
|
||||
]
|
||||
|
||||
# Add default field values
|
||||
for i, default_field in enumerate(default_fields):
|
||||
field_name = default_field['name']
|
||||
field_value = default_field['value']
|
||||
|
||||
# Add comma for all but the last field
|
||||
comma = "," if i < len(default_fields) - 1 else ""
|
||||
obj_lines.append(f" {field_name}: {field_value}{comma}")
|
||||
|
||||
obj_lines.append("};")
|
||||
obj_lines.append("") # Empty line after each object
|
||||
|
||||
default_objects.append('\n'.join(obj_lines))
|
||||
|
||||
if not default_objects:
|
||||
return ""
|
||||
|
||||
# Generate the default objects section
|
||||
result = [
|
||||
"// ============================",
|
||||
"// Default Objects",
|
||||
"// ============================",
|
||||
"",
|
||||
"// These objects contain the default values from your Pydantic models",
|
||||
"// Use them to initialize objects with sensible defaults:",
|
||||
"// const message: ChatMessage = { ...DefaultChatMessage, sessionId: '123', content: 'Hello' };",
|
||||
"",
|
||||
]
|
||||
|
||||
result.extend(default_objects)
|
||||
|
||||
return '\n'.join(result)
|
||||
"""Generate TypeScript conversion functions for models with date fields"""
|
||||
conversion_functions = []
|
||||
|
||||
for interface in interfaces:
|
||||
interface_name = interface['name']
|
||||
date_fields = interface.get('date_fields', [])
|
||||
|
||||
if not date_fields:
|
||||
continue # Skip interfaces without date fields
|
||||
|
||||
function_name = f"convert{interface_name}FromApi"
|
||||
|
||||
# Generate function
|
||||
func_lines = [
|
||||
f"/**",
|
||||
f" * Convert {interface_name} from API response, parsing date fields",
|
||||
f" * Date fields: {', '.join([f['name'] for f in date_fields])}",
|
||||
f" */",
|
||||
f"export function {function_name}(data: any): {interface_name} {{",
|
||||
f" if (!data) return data;",
|
||||
f" ",
|
||||
f" return {{",
|
||||
f" ...data,"
|
||||
]
|
||||
|
||||
# Add date field conversions with validation
|
||||
for date_field in date_fields:
|
||||
field_name = date_field['name']
|
||||
is_optional = date_field['optional']
|
||||
|
||||
# Add a comment for clarity
|
||||
func_lines.append(f" // Convert {field_name} from ISO string to Date")
|
||||
|
||||
if is_optional:
|
||||
func_lines.append(f" {field_name}: data.{field_name} ? new Date(data.{field_name}) : undefined,")
|
||||
else:
|
||||
func_lines.append(f" {field_name}: new Date(data.{field_name}),")
|
||||
|
||||
func_lines.extend([
|
||||
f" }};",
|
||||
f"}}"
|
||||
])
|
||||
|
||||
conversion_functions.append('\n'.join(func_lines))
|
||||
|
||||
if not conversion_functions:
|
||||
return ""
|
||||
|
||||
# Generate the conversion functions section
|
||||
result = [
|
||||
"// ============================",
|
||||
"// Date Conversion Functions",
|
||||
"// ============================",
|
||||
"",
|
||||
"// These functions convert API responses to properly typed objects",
|
||||
"// with Date objects instead of ISO date strings",
|
||||
"",
|
||||
]
|
||||
|
||||
result.extend(conversion_functions)
|
||||
result.append("")
|
||||
|
||||
# Generate a generic converter function
|
||||
models_with_dates = [interface['name'] for interface in interfaces if interface.get('date_fields')]
|
||||
|
||||
if models_with_dates:
|
||||
result.extend([
|
||||
"/**",
|
||||
" * Generic converter that automatically selects the right conversion function",
|
||||
" * based on the model type",
|
||||
" */",
|
||||
"export function convertFromApi<T>(data: any, modelType: string): T {",
|
||||
" if (!data) return data;",
|
||||
" ",
|
||||
" switch (modelType) {"
|
||||
])
|
||||
|
||||
for model_name in models_with_dates:
|
||||
result.append(f" case '{model_name}':")
|
||||
result.append(f" return convert{model_name}FromApi(data) as T;")
|
||||
|
||||
result.extend([
|
||||
" default:",
|
||||
" return data as T;",
|
||||
" }",
|
||||
"}",
|
||||
"",
|
||||
"/**",
|
||||
" * Convert array of items using the appropriate converter",
|
||||
" */",
|
||||
"export function convertArrayFromApi<T>(data: any[], modelType: string): T[] {",
|
||||
" if (!data || !Array.isArray(data)) return data;",
|
||||
" return data.map(item => convertFromApi<T>(item, modelType));",
|
||||
"}",
|
||||
""
|
||||
])
|
||||
|
||||
return '\n'.join(result)
|
||||
|
||||
def generate_conversion_functions(interfaces: List[Dict[str, Any]]) -> str:
|
||||
"""Generate TypeScript conversion functions for models with date fields"""
|
||||
conversion_functions = []
|
||||
@ -935,10 +707,8 @@ def generate_typescript_interfaces(source_file: str, debug: bool = False):
|
||||
continue
|
||||
|
||||
total_date_fields = sum(len(interface.get('date_fields', [])) for interface in interfaces)
|
||||
total_default_fields = sum(len(interface.get('default_fields', [])) for interface in interfaces)
|
||||
print(f"\n📊 Found {len(interfaces)} interfaces and {len(enums)} enums")
|
||||
print(f"🗓️ Found {total_date_fields} date fields across all models")
|
||||
print(f"🎯 Found {total_default_fields} fields with default values across all models")
|
||||
|
||||
# Generate TypeScript content
|
||||
ts_content = f"""// Generated TypeScript types from Pydantic models
|
||||
@ -972,11 +742,6 @@ def generate_typescript_interfaces(source_file: str, debug: bool = False):
|
||||
|
||||
ts_content += "}\n\n"
|
||||
|
||||
# Add default objects
|
||||
default_objects = generate_default_objects(interfaces)
|
||||
if default_objects:
|
||||
ts_content += default_objects
|
||||
|
||||
# Add conversion functions
|
||||
conversion_functions = generate_conversion_functions(interfaces)
|
||||
if conversion_functions:
|
||||
@ -1016,7 +781,7 @@ def compile_typescript(ts_file: str) -> bool:
|
||||
def main():
|
||||
"""Main function with command line argument parsing"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Generate TypeScript types from Pydantic models with date conversion functions, default objects, and proper enum handling',
|
||||
description='Generate TypeScript types from Pydantic models with date conversion functions and proper enum handling',
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
@ -1031,12 +796,8 @@ Generated conversion functions can be used like:
|
||||
const candidate = convertCandidateFromApi(apiResponse);
|
||||
const jobs = convertArrayFromApi<Job>(apiResponse, 'Job');
|
||||
|
||||
Generated default objects can be used like:
|
||||
const message: ChatMessage = { ...DefaultChatMessage, sessionId: '123', content: 'Hello' };
|
||||
const overrideMessage: ChatMessage = { ...DefaultChatMessage, status: 'error' };
|
||||
|
||||
Enum fields now properly support all enum values:
|
||||
status: ChatStatusType = ChatStatusType.DONE -> status: "pending" | "processing" | "done" | "error"
|
||||
Enum types are now properly handled:
|
||||
status: ChatStatusType = ChatStatusType.DONE -> status: ChatStatusType (not locked to "done")
|
||||
"""
|
||||
)
|
||||
|
||||
@ -1073,12 +834,12 @@ Enum fields now properly support all enum values:
|
||||
parser.add_argument(
|
||||
'--version', '-v',
|
||||
action='version',
|
||||
version='TypeScript Generator 3.3 (with Default Objects and Fixed Enum Handling)'
|
||||
version='TypeScript Generator 3.2 (Fixed Enum Default Handling)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
print("🚀 Enhanced TypeScript Type Generator with Default Objects and Fixed Enum Handling")
|
||||
print("🚀 Enhanced TypeScript Type Generator with Fixed Enum Handling")
|
||||
print("=" * 60)
|
||||
print(f"📁 Source file: {args.source}")
|
||||
print(f"📁 Output file: {args.output}")
|
||||
@ -1123,37 +884,27 @@ Enum fields now properly support all enum values:
|
||||
|
||||
# Count conversion functions and provide detailed feedback
|
||||
conversion_count = ts_content.count('export function convert') - ts_content.count('convertFromApi') - ts_content.count('convertArrayFromApi')
|
||||
default_objects_count = ts_content.count('export const Default')
|
||||
enum_union_count = ts_content.count(' | ')
|
||||
enum_type_count = ts_content.count('export type')
|
||||
|
||||
if conversion_count > 0:
|
||||
print(f"🗓️ Generated {conversion_count} date conversion functions")
|
||||
if default_objects_count > 0:
|
||||
print(f"🎯 Generated {default_objects_count} default objects")
|
||||
if enum_union_count > 0:
|
||||
print(f"🔗 Generated {enum_union_count} union types (including proper enum types)")
|
||||
if enum_type_count > 0:
|
||||
print(f"🎯 Generated {enum_type_count} enum types (properly allowing all values)")
|
||||
|
||||
if args.debug:
|
||||
# Show which models have date conversion
|
||||
models_with_dates = []
|
||||
models_with_defaults = []
|
||||
for line in ts_content.split('\n'):
|
||||
if line.startswith('export function convert') and 'FromApi' in line and 'convertFromApi' not in line:
|
||||
model_name = line.split('convert')[1].split('FromApi')[0]
|
||||
models_with_dates.append(model_name)
|
||||
elif line.startswith('export const Default'):
|
||||
model_name = line.split('export const Default')[1].split(':')[0]
|
||||
models_with_defaults.append(model_name)
|
||||
|
||||
if models_with_dates:
|
||||
print(f" Models with date conversion: {', '.join(models_with_dates)}")
|
||||
if models_with_defaults:
|
||||
print(f" Models with default objects: {', '.join(models_with_defaults)}")
|
||||
|
||||
# Provide troubleshooting info if debug mode
|
||||
if args.debug:
|
||||
print(f"\n🐛 Debug mode was enabled. If you see incorrect type conversions:")
|
||||
print(f" 1. Look for '🎯 Enum type detected' lines to verify enum handling")
|
||||
print(f" 1. Check the debug output above for enum default handling")
|
||||
print(f" 2. Look for '📅 Date type check' lines for date handling")
|
||||
print(f" 3. Look for '⚠️' warnings about fallback types")
|
||||
print(f" 4. Verify your Pydantic model field types and defaults are correct")
|
||||
@ -1174,24 +925,19 @@ Enum fields now properly support all enum values:
|
||||
print(f"✅ File size: {file_size} characters")
|
||||
if conversion_count > 0:
|
||||
print(f"✅ Date conversion functions: {conversion_count}")
|
||||
if default_objects_count > 0:
|
||||
print(f"✅ Default objects: {default_objects_count}")
|
||||
if enum_union_count > 0:
|
||||
print(f"✅ Union types (proper enum support): {enum_union_count}")
|
||||
if enum_type_count > 0:
|
||||
print(f"✅ Enum types (with full value range): {enum_type_count}")
|
||||
if not args.skip_test:
|
||||
print("✅ Model validation passed")
|
||||
if not args.skip_compile:
|
||||
print("✅ TypeScript syntax validated")
|
||||
|
||||
print(f"\n💡 Usage in your TypeScript project:")
|
||||
print(f" import {{ ChatMessage, ChatStatusType, DefaultChatMessage, convertChatMessageFromApi }} from './{Path(args.output).stem}';")
|
||||
print(f" const message: ChatMessage = {{ ...DefaultChatMessage, sessionId: '123', content: 'Hello' }};")
|
||||
print(f" import {{ Candidate, Employer, Job, convertCandidateFromApi }} from './{Path(args.output).stem}';")
|
||||
if conversion_count > 0:
|
||||
print(f" const message = convertChatMessageFromApi(apiResponse);")
|
||||
print(f" const messages = convertArrayFromApi<ChatMessage>(apiResponse, 'ChatMessage');")
|
||||
if default_objects_count > 0:
|
||||
print(f" const overrideMessage: ChatMessage = {{ ...DefaultChatMessage, status: 'error' }};")
|
||||
|
||||
print(f" const candidate = convertCandidateFromApi(apiResponse);")
|
||||
print(f" const jobs = convertArrayFromApi<Job>(apiResponse, 'Job');")
|
||||
|
||||
return True
|
||||
|
||||
except KeyboardInterrupt:
|
||||
|
@ -279,6 +279,12 @@ async def get_database() -> RedisDatabase:
|
||||
"""
|
||||
return db_manager.get_database()
|
||||
|
||||
async def get_last_item(generator):
|
||||
last_item = None
|
||||
async for item in generator:
|
||||
last_item = item
|
||||
return last_item
|
||||
|
||||
def create_success_response(data: Any, meta: Optional[Dict] = None) -> Dict:
|
||||
return {
|
||||
"success": True,
|
||||
@ -3050,7 +3056,7 @@ async def post_chat_session_message_stream(
|
||||
status_code=404,
|
||||
content=create_error_response("CANDIDATE_NOT_FOUND", "Candidate not found for this chat session")
|
||||
)
|
||||
logger.info(f"🔗 User {current_user.id} posting message to chat session {user_message.session_id} with query: {user_message.content}")
|
||||
logger.info(f"🔗 User {current_user.id} posting message to chat session {user_message.session_id} with query length: {len(user_message.content)}")
|
||||
|
||||
async with entities.get_candidate_entity(candidate=candidate) as candidate_entity:
|
||||
# Entity automatically released when done
|
||||
@ -3343,6 +3349,68 @@ async def reset_chat_session(
|
||||
content=create_error_response("RESET_ERROR", str(e))
|
||||
)
|
||||
|
||||
@api_router.post("/candidates/{candidate_id}/skill-match")
|
||||
async def get_candidate_skill_match(
|
||||
candidate_id: str = Path(...),
|
||||
requirement: str = Body(...),
|
||||
current_user = Depends(get_current_user),
|
||||
database: RedisDatabase = Depends(get_database)
|
||||
):
|
||||
"""Get skill match for a candidate against a requirement"""
|
||||
try:
|
||||
# Find candidate by ID
|
||||
candidate_data = await database.get_candidate(candidate_id)
|
||||
if not candidate_data:
|
||||
return JSONResponse(
|
||||
status_code=404,
|
||||
content=create_error_response("CANDIDATE_NOT_FOUND", f"Candidate with ID '{candidate_id}' not found")
|
||||
)
|
||||
|
||||
candidate = Candidate.model_validate(candidate_data)
|
||||
|
||||
logger.info(f"🔍 Running skill match for candidate {candidate.id} against requirement: {requirement}")
|
||||
async with entities.get_candidate_entity(candidate=candidate) as candidate_entity:
|
||||
agent = candidate_entity.get_or_create_agent(agent_type=ChatContextType.SKILL_MATCH)
|
||||
if not agent:
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content=create_error_response("AGENT_NOT_FOUND", "No skill match agent found for this candidate")
|
||||
)
|
||||
# Entity automatically released when done
|
||||
skill_match = await get_last_item(
|
||||
agent.generate(
|
||||
llm=llm_manager.get_llm(),
|
||||
model=defines.model,
|
||||
user_message=ChatMessageUser(
|
||||
sender_id=candidate.id,
|
||||
session_id="",
|
||||
content=requirement,
|
||||
timestamp=datetime.now(UTC)
|
||||
),
|
||||
user=candidate,
|
||||
)
|
||||
)
|
||||
if skill_match is None:
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content=create_error_response("NO_MATCH", "No skill match found for the given requirement")
|
||||
)
|
||||
skill_match = skill_match.content.strip()
|
||||
logger.info(f"✅ Skill match found for candidate {candidate.id}: {skill_match}")
|
||||
|
||||
return create_success_response({
|
||||
"candidateId": candidate.id,
|
||||
"skillMatch": skill_match
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(traceback.format_exc())
|
||||
logger.error(f"❌ Get candidate skill match error: {e}")
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content=create_error_response("SKILL_MATCH_ERROR", str(e))
|
||||
)
|
||||
|
||||
@api_router.get("/candidates/{username}/chat-sessions")
|
||||
async def get_candidate_chat_sessions(
|
||||
username: str = Path(...),
|
||||
|
@ -71,8 +71,51 @@ class InterviewRecommendation(str, Enum):
|
||||
class ChatSenderType(str, Enum):
|
||||
USER = "user"
|
||||
ASSISTANT = "assistant"
|
||||
AGENT = "agent"
|
||||
SYSTEM = "system"
|
||||
|
||||
class Requirements(BaseModel):
|
||||
required: List[str] = Field(default_factory=list)
|
||||
preferred: List[str] = Field(default_factory=list)
|
||||
|
||||
@model_validator(mode='before')
|
||||
def validate_requirements(cls, values):
|
||||
if not isinstance(values, dict):
|
||||
raise ValueError("Requirements must be a dictionary with 'required' and 'preferred' keys.")
|
||||
return values
|
||||
|
||||
class Citation(BaseModel):
|
||||
text: str
|
||||
source: str
|
||||
relevance: int # 0-100 scale
|
||||
|
||||
class SkillStatus(str, Enum):
|
||||
PENDING = "pending"
|
||||
COMPLETE = "complete"
|
||||
ERROR = "error"
|
||||
|
||||
class SkillMatch(BaseModel):
|
||||
requirement: str
|
||||
status: SkillStatus
|
||||
match_score: int = Field(..., alias='matchScore')
|
||||
assessment: str
|
||||
citations: List[Citation] = Field(default_factory=list)
|
||||
model_config = {
|
||||
"populate_by_name": True # Allow both field names and aliases
|
||||
}
|
||||
|
||||
class JobRequirements(BaseModel):
|
||||
technical_skills: Requirements = Field(..., alias="technicalSkills")
|
||||
experience_requirements: Requirements = Field(..., alias="experienceRequirements")
|
||||
soft_skills: Optional[List[str]] = Field(default_factory=list, alias="softSkills")
|
||||
experience: Optional[List[str]] = []
|
||||
education: Optional[List[str]] = []
|
||||
certifications: Optional[List[str]] = []
|
||||
preferred_attributes: Optional[List[str]] = Field(None, alias="preferredAttributes")
|
||||
model_config = {
|
||||
"populate_by_name": True # Allow both field names and aliases
|
||||
}
|
||||
|
||||
class ChatMessageType(str, Enum):
|
||||
ERROR = "error"
|
||||
GENERATING = "generating"
|
||||
@ -106,6 +149,7 @@ class ChatContextType(str, Enum):
|
||||
GENERATE_PROFILE = "generate_profile"
|
||||
GENERATE_IMAGE = "generate_image"
|
||||
RAG_SEARCH = "rag_search"
|
||||
SKILL_MATCH = "skill_match"
|
||||
|
||||
class AIModelType(str, Enum):
|
||||
QWEN2_5 = "qwen2.5"
|
||||
@ -710,20 +754,24 @@ class ChatOptions(BaseModel):
|
||||
seed: Optional[int] = 8911
|
||||
num_ctx: Optional[int] = Field(default=None, alias="numCtx") # Number of context tokens
|
||||
temperature: Optional[float] = Field(default=0.7) # Higher temperature to encourage tool usage
|
||||
model_config = {
|
||||
"populate_by_name": True # Allow both field names and aliases
|
||||
}
|
||||
|
||||
|
||||
class LLMMessage(BaseModel):
|
||||
role: str = Field(default="")
|
||||
content: str = Field(default="")
|
||||
tool_calls: Optional[List[Dict]] = Field(default={}, exclude=True)
|
||||
tool_calls: Optional[List[Dict]] = Field(default=[], exclude=True)
|
||||
|
||||
|
||||
class ChatMessageBase(BaseModel):
|
||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||
session_id: str = Field(..., alias="sessionId")
|
||||
sender_id: Optional[str] = Field(None, alias="senderId")
|
||||
status: ChatStatusType = ChatStatusType.INITIALIZING
|
||||
type: ChatMessageType = ChatMessageType.PREPARING
|
||||
sender: ChatSenderType = ChatSenderType.SYSTEM
|
||||
status: ChatStatusType #= ChatStatusType.INITIALIZING
|
||||
type: ChatMessageType #= ChatMessageType.PREPARING
|
||||
sender: ChatSenderType #= ChatSenderType.SYSTEM
|
||||
timestamp: datetime = Field(default_factory=lambda: datetime.now(UTC), alias="timestamp")
|
||||
tunables: Optional[Tunables] = None
|
||||
content: str = ""
|
||||
@ -759,8 +807,8 @@ class ChatMessageMetaData(BaseModel):
|
||||
}
|
||||
|
||||
class ChatMessageUser(ChatMessageBase):
|
||||
status: ChatStatusType = ChatStatusType.DONE
|
||||
type: ChatMessageType = ChatMessageType.USER
|
||||
status: ChatStatusType = ChatStatusType.INITIALIZING
|
||||
type: ChatMessageType = ChatMessageType.GENERATING
|
||||
sender: ChatSenderType = ChatSenderType.USER
|
||||
|
||||
class ChatMessage(ChatMessageBase):
|
||||
|
@ -473,6 +473,7 @@ class ChromaDBFileWatcher(FileSystemEventHandler):
|
||||
logging.error(chunk)
|
||||
|
||||
def prepare_metadata(self, meta: Dict[str, Any], buffer=defines.chunk_buffer)-> str | None:
|
||||
source_file = meta.get("source_file")
|
||||
try:
|
||||
source_file = meta["source_file"]
|
||||
path_parts = source_file.split(os.sep)
|
||||
@ -487,7 +488,7 @@ class ChromaDBFileWatcher(FileSystemEventHandler):
|
||||
meta["chunk_end"] = end
|
||||
return "".join(lines[start:end])
|
||||
except:
|
||||
logging.warning(f"Unable to open {meta["source_file"]}")
|
||||
logging.warning(f"Unable to open {source_file}")
|
||||
return None
|
||||
|
||||
# Cosine Distance Equivalent Similarity Retrieval Characteristics
|
||||
|
Loading…
x
Reference in New Issue
Block a user