Reformatting with ruff

This commit is contained in:
James Ketr 2025-06-18 13:30:54 -07:00
parent f53ff967cb
commit f1c2e16389
26 changed files with 174 additions and 255 deletions

View File

@ -1,5 +1,5 @@
from __future__ import annotations from __future__ import annotations
from pydantic import BaseModel, Field, model_validator # type: ignore from pydantic import BaseModel, Field # type: ignore
from typing import ( from typing import (
Literal, Literal,
get_args, get_args,
@ -13,10 +13,10 @@ import time
import re import re
from abc import ABC from abc import ABC
from datetime import datetime, UTC from datetime import datetime, UTC
from prometheus_client import Counter, Summary, CollectorRegistry # type: ignore from prometheus_client import CollectorRegistry # type: ignore
import numpy as np # type: ignore import numpy as np # type: ignore
import json_extractor as json_extractor import json_extractor as json_extractor
from pydantic import BaseModel, Field, model_validator # type: ignore from pydantic import BaseModel, Field # type: ignore
from uuid import uuid4 from uuid import uuid4
from typing import List, Optional, ClassVar, Any, Literal from typing import List, Optional, ClassVar, Any, Literal
@ -24,7 +24,7 @@ from datetime import datetime, UTC
import numpy as np # type: ignore import numpy as np # type: ignore
from uuid import uuid4 from uuid import uuid4
from prometheus_client import CollectorRegistry, Counter # type: ignore from prometheus_client import CollectorRegistry # type: ignore
import os import os
import re import re
from pathlib import Path from pathlib import Path
@ -509,7 +509,6 @@ Content: {content}
return return
results : List[ChromaDBGetResponse] = [] results : List[ChromaDBGetResponse] = []
entries: int = 0
user: CandidateEntity = self.user user: CandidateEntity = self.user
for rag in user.rags: for rag in user.rags:
if not rag.enabled: if not rag.enabled:
@ -594,7 +593,7 @@ Content: {content}
status_message = ChatMessageStatus( status_message = ChatMessageStatus(
session_id=session_id, session_id=session_id,
activity=ApiActivityType.GENERATING, activity=ApiActivityType.GENERATING,
content=f"Generating response..." content="Generating response..."
) )
yield status_message yield status_message
@ -675,7 +674,6 @@ Content: {content}
session_id=session_id, session_id=session_id,
content=prompt, content=prompt,
) )
user = self.user
self.user.metrics.generate_count.labels(agent=self.agent_type).inc() self.user.metrics.generate_count.labels(agent=self.agent_type).inc()
with self.user.metrics.generate_duration.labels(agent=self.agent_type).time(): with self.user.metrics.generate_duration.labels(agent=self.agent_type).time():
@ -723,7 +721,6 @@ Content: {content}
LLMMessage(role="user", content=prompt) LLMMessage(role="user", content=prompt)
) )
llm_history = messages
# use_tools = message.tunables.enable_tools and len(self.context.tools) > 0 # use_tools = message.tunables.enable_tools and len(self.context.tools) > 0
# message.metadata.tools = { # message.metadata.tools = {
@ -829,7 +826,7 @@ Content: {content}
status_message = ChatMessageStatus( status_message = ChatMessageStatus(
session_id=session_id, session_id=session_id,
activity=ApiActivityType.GENERATING, activity=ApiActivityType.GENERATING,
content=f"Generating response..." content="Generating response..."
) )
yield status_message yield status_message

View File

@ -8,7 +8,7 @@ from .registry import agent_registry
from models import ( ApiMessage, Tunables, ApiStatusType) from models import ( ApiMessage, Tunables, ApiStatusType)
system_message = f""" system_message = """
When answering queries, follow these steps: When answering queries, follow these steps:
- When any content from <|context|> is relevant, synthesize information from all sources to provide the most complete answer. - When any content from <|context|> is relevant, synthesize information from all sources to provide the most complete answer.

View File

@ -1,12 +1,9 @@
from __future__ import annotations from __future__ import annotations
from typing import ( from typing import (
Dict,
Literal, Literal,
ClassVar, ClassVar,
cast,
Any, Any,
AsyncGenerator, AsyncGenerator,
List,
Optional Optional
# override # override
) # NOTE: You must import Optional for late binding to work ) # NOTE: You must import Optional for late binding to work

View File

@ -1,10 +1,9 @@
from __future__ import annotations from __future__ import annotations
from pydantic import model_validator, Field, BaseModel # type: ignore from pydantic import Field # type: ignore
from typing import ( from typing import (
Dict, Dict,
Literal, Literal,
ClassVar, ClassVar,
cast,
Any, Any,
Tuple, Tuple,
AsyncGenerator, AsyncGenerator,
@ -19,7 +18,6 @@ import time
import time import time
import os import os
import random import random
from names_dataset import NameDataset, NameWrapper # type: ignore
from .base import Agent, agent_registry from .base import Agent, agent_registry
from models import ApiActivityType, ChatMessage, ChatMessageError, ApiMessageType, ChatMessageStatus, ChatMessageStreaming, ApiStatusType, Tunables from models import ApiActivityType, ChatMessage, ChatMessageError, ApiMessageType, ChatMessageStatus, ChatMessageStreaming, ApiStatusType, Tunables
@ -339,7 +337,7 @@ Incorporate the following into the job description: {original_prompt}
# #
# Generate the persona # Generate the persona
# #
logger.info(f"🤖 Generating persona...") logger.info("🤖 Generating persona...")
generating_message = None generating_message = None
async for generating_message in self.llm_one_shot( async for generating_message in self.llm_one_shot(
llm=llm, model=model, llm=llm, model=model,

View File

@ -1,17 +1,13 @@
from __future__ import annotations from __future__ import annotations
from pydantic import model_validator, Field # type: ignore
from typing import ( from typing import (
Dict,
Literal, Literal,
ClassVar, ClassVar,
Any, Any,
AsyncGenerator, AsyncGenerator,
List, List
Optional
# override # override
) # NOTE: You must import Optional for late binding to work ) # NOTE: You must import Optional for late binding to work
import json import json
import numpy as np # type: ignore
from logger import logger from logger import logger
from .base import Agent, agent_registry from .base import Agent, agent_registry
@ -172,7 +168,7 @@ Format it in clean, ATS-friendly markdown. Provide ONLY the resume with no comme
# Stage 1A: Analyze job requirements # Stage 1A: Analyze job requirements
status_message = ChatMessageStatus( status_message = ChatMessageStatus(
session_id=session_id, session_id=session_id,
content = f"Analyzing job requirements", content = "Analyzing job requirements",
activity=ApiActivityType.THINKING activity=ApiActivityType.THINKING
) )
yield status_message yield status_message
@ -215,7 +211,7 @@ Format it in clean, ATS-friendly markdown. Provide ONLY the resume with no comme
system_prompt=system_prompt, system_prompt=system_prompt,
) )
yield resume_message yield resume_message
logger.info(f"✅ Resume generation completed successfully.") logger.info("✅ Resume generation completed successfully.")
return return
# Register the base agent # Register the base agent

View File

@ -1,18 +1,15 @@
from __future__ import annotations from __future__ import annotations
from pydantic import model_validator, Field # type: ignore
from typing import ( from typing import (
Dict, Dict,
Literal, Literal,
ClassVar, ClassVar,
Any, Any,
AsyncGenerator, AsyncGenerator,
List,
Optional Optional
# override # override
) # NOTE: You must import Optional for late binding to work ) # NOTE: You must import Optional for late binding to work
import inspect import inspect
import json import json
import numpy as np # type: ignore
from .base import Agent, agent_registry from .base import Agent, agent_registry
from models import ApiActivityType, ApiMessage, ChatMessage, ChatMessageError, ChatMessageStatus, ChatMessageStreaming, ApiStatusType, Job, JobRequirements, JobRequirementsMessage, Tunables from models import ApiActivityType, ApiMessage, ChatMessage, ChatMessageError, ChatMessageStatus, ChatMessageStreaming, ApiStatusType, Job, JobRequirements, JobRequirementsMessage, Tunables
@ -163,7 +160,7 @@ Avoid vague categorizations and be precise about whether skills are explicitly r
# Stage 1A: Analyze job requirements # Stage 1A: Analyze job requirements
status_message = ChatMessageStatus( status_message = ChatMessageStatus(
session_id=session_id, session_id=session_id,
content = f"Analyzing job requirements", content = "Analyzing job requirements",
activity=ApiActivityType.THINKING activity=ApiActivityType.THINKING
) )
yield status_message yield status_message
@ -238,7 +235,7 @@ Avoid vague categorizations and be precise about whether skills are explicitly r
job=job, job=job,
) )
yield job_requirements_message yield job_requirements_message
logger.info(f"✅ Job requirements analysis completed successfully.") logger.info("✅ Job requirements analysis completed successfully.")
return return
# Register the base agent # Register the base agent

View File

@ -1,17 +1,13 @@
from __future__ import annotations from __future__ import annotations
from pydantic import model_validator, Field # type: ignore
from typing import ( from typing import (
Dict,
Literal, Literal,
ClassVar, ClassVar,
Any, Any,
AsyncGenerator, AsyncGenerator,
List,
Optional Optional
# override # override
) # NOTE: You must import Optional for late binding to work ) # NOTE: You must import Optional for late binding to work
import json import json
import numpy as np # type: ignore
from .base import Agent, agent_registry from .base import Agent, agent_registry
from models import (ApiMessage, ChatMessage, ChatMessageError, ChatMessageRagSearch, ChatMessageSkillAssessment, ApiStatusType, EvidenceDetail, from models import (ApiMessage, ChatMessage, ChatMessageError, ChatMessageRagSearch, ChatMessageSkillAssessment, ApiStatusType, EvidenceDetail,
@ -233,7 +229,7 @@ JSON RESPONSE:"""
skill_assessment=skill_assessment, skill_assessment=skill_assessment,
) )
yield skill_assessment_message yield skill_assessment_message
logger.info(f"✅ Skill assessment completed successfully.") logger.info("✅ Skill assessment completed successfully.")
return return
# Register the base agent # Register the base agent

View File

@ -5,10 +5,6 @@ import logging
import os import os
from datetime import datetime, UTC from datetime import datetime, UTC
import asyncio import asyncio
from models import (
# User models
Candidate, Employer, BaseUser, EvidenceDetail, Guest, Authentication, AuthResponse, SkillAssessment,
)
from .core import RedisDatabase from .core import RedisDatabase
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@ -374,7 +374,7 @@ class AuthMixin(DatabaseProtocol):
token_data["is_revoked"] = True token_data["is_revoked"] = True
token_data["revoked_at"] = datetime.now(timezone.utc).isoformat() token_data["revoked_at"] = datetime.now(timezone.utc).isoformat()
await self.redis.set(key, json.dumps(token_data, default=str)) await self.redis.set(key, json.dumps(token_data, default=str))
logger.info(f"🔐 Revoked refresh token") logger.info("🔐 Revoked refresh token")
return True return True
return False return False
except Exception as e: except Exception as e:
@ -457,7 +457,7 @@ class AuthMixin(DatabaseProtocol):
token_data["used"] = True token_data["used"] = True
token_data["used_at"] = datetime.now(timezone.utc).isoformat() token_data["used_at"] = datetime.now(timezone.utc).isoformat()
await self.redis.set(key, json.dumps(token_data, default=str)) await self.redis.set(key, json.dumps(token_data, default=str))
logger.info(f"🔐 Marked password reset token as used") logger.info("🔐 Marked password reset token as used")
return True return True
return False return False
except Exception as e: except Exception as e:

View File

@ -13,7 +13,6 @@ class DeviceManager:
def generate_device_fingerprint(self, request: Request) -> str: def generate_device_fingerprint(self, request: Request) -> str:
"""Generate device fingerprint from request""" """Generate device fingerprint from request"""
user_agent = request.headers.get("user-agent", "") user_agent = request.headers.get("user-agent", "")
ip_address = request.client.host if request.client else "unknown"
accept_language = request.headers.get("accept-language", "") accept_language = request.headers.get("accept-language", "")
# Create fingerprint # Create fingerprint

View File

@ -3,7 +3,7 @@ import weakref
from datetime import datetime, timedelta from datetime import datetime, timedelta
from typing import Dict, Optional from typing import Dict, Optional
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from pydantic import BaseModel, Field # type: ignore from pydantic import BaseModel # type: ignore
from models import Candidate from models import Candidate
from agents.base import CandidateEntity from agents.base import CandidateEntity

View File

@ -23,17 +23,18 @@ def test_model_creation():
# Create candidate # Create candidate
candidate = Candidate( candidate = Candidate(
email="test@example.com", email="test@example.com",
user_type=UserType.CANDIDATE,
username="test_candidate", username="test_candidate",
createdAt=datetime.now(), created_at=datetime.now(),
updatedAt=datetime.now(), updated_at=datetime.now(),
status=UserStatus.ACTIVE, status=UserStatus.ACTIVE,
firstName="John", first_name="John",
lastName="Doe", last_name="Doe",
fullName="John Doe", full_name="John Doe",
skills=[skill], skills=[skill],
experience=[], experience=[],
education=[], education=[],
preferredJobTypes=[EmploymentType.FULL_TIME], preferred_job_types=[EmploymentType.FULL_TIME],
location=location, location=location,
languages=[], languages=[],
certifications=[] certifications=[]
@ -41,18 +42,18 @@ def test_model_creation():
# Create employer # Create employer
employer = Employer( employer = Employer(
firstName="Mary", user_type=UserType.EMPLOYER,
lastName="Smith", first_name="Mary",
fullName="Mary Smith", last_name="Smith",
full_name="Mary Smith",
email="hr@company.com", email="hr@company.com",
username="test_employer", created_at=datetime.now(),
createdAt=datetime.now(), updated_at=datetime.now(),
updatedAt=datetime.now(),
status=UserStatus.ACTIVE, status=UserStatus.ACTIVE,
companyName="Test Company", company_name="Test Company",
industry="Technology", industry="Technology",
companySize="50-200", company_size="50-200",
companyDescription="A test company", company_description="A test company",
location=location location=location
) )
@ -84,8 +85,8 @@ def test_json_api_format():
assert candidate_back.first_name == candidate.first_name assert candidate_back.first_name == candidate.first_name
assert employer_back.company_name == employer.company_name assert employer_back.company_name == employer.company_name
print(f"✅ JSON round-trip successful") print("✅ JSON round-trip successful")
print(f"✅ Data integrity verified") print("✅ Data integrity verified")
return True return True
@ -105,8 +106,8 @@ def test_api_dict_format():
assert "createdAt" in candidate_dict assert "createdAt" in candidate_dict
assert "companyName" in employer_dict assert "companyName" in employer_dict
print(f"✅ API format dictionaries created") print("✅ API format dictionaries created")
print(f"✅ CamelCase aliases verified") print("✅ CamelCase aliases verified")
# Test deserializing from API format # Test deserializing from API format
candidate_back = Candidate.model_validate(candidate_dict) candidate_back = Candidate.model_validate(candidate_dict)
@ -115,7 +116,7 @@ def test_api_dict_format():
assert candidate_back.email == candidate.email assert candidate_back.email == candidate.email
assert employer_back.company_name == employer.company_name assert employer_back.company_name == employer.company_name
print(f"✅ API format round-trip successful") print("✅ API format round-trip successful")
return True return True
@ -125,15 +126,16 @@ def test_validation_constraints():
try: try:
# Create a candidate with invalid email # Create a candidate with invalid email
invalid_candidate = Candidate( Candidate(
user_type=UserType.CANDIDATE,
email="invalid-email", email="invalid-email",
username="test_invalid", username="test_invalid",
createdAt=datetime.now(), created_at=datetime.now(),
updatedAt=datetime.now(), updated_at=datetime.now(),
status=UserStatus.ACTIVE, status=UserStatus.ACTIVE,
firstName="Jane", first_name="Jane",
lastName="Doe", last_name="Doe",
fullName="Jane Doe" full_name="Jane Doe"
) )
print("❌ Validation should have failed but didn't") print("❌ Validation should have failed but didn't")
return False return False
@ -155,7 +157,7 @@ def test_enum_values():
assert candidate_dict["userType"] == "candidate" assert candidate_dict["userType"] == "candidate"
assert employer.user_type == UserType.EMPLOYER assert employer.user_type == UserType.EMPLOYER
print(f"✅ Enum values correctly serialized") print("✅ Enum values correctly serialized")
print(f"✅ User types: candidate={candidate.user_type}, employer={employer.user_type}") print(f"✅ User types: candidate={candidate.user_type}, employer={employer.user_type}")
return True return True
@ -172,7 +174,7 @@ def main():
test_validation_constraints() test_validation_constraints()
test_enum_values() test_enum_values()
print(f"\n🎉 All focused tests passed!") print("\n🎉 All focused tests passed!")
print("=" * 40) print("=" * 40)
print("✅ Models work correctly") print("✅ Models work correctly")
print("✅ JSON API format works") print("✅ JSON API format works")

View File

@ -48,7 +48,7 @@ def flux_worker(pipe: Any, params: ImageRequest, status_queue: queue.Queue, task
# Flux: Run generation in the background and yield progress updates # Flux: Run generation in the background and yield progress updates
status_queue.put(ChatMessageStatus( status_queue.put(ChatMessageStatus(
session_id=params.session_id, session_id=params.session_id,
content=f"Initializing image generation.", content="Initializing image generation.",
activity=ApiActivityType.GENERATING_IMAGE, activity=ApiActivityType.GENERATING_IMAGE,
)) ))
@ -224,7 +224,7 @@ async def generate_image(request: ImageRequest) -> AsyncGenerator[ChatMessage, N
return return
filedir = os.path.dirname(request.filepath) filedir = os.path.dirname(request.filepath)
filename = os.path.basename(request.filepath) os.path.basename(request.filepath)
os.makedirs(filedir, exist_ok=True) os.makedirs(filedir, exist_ok=True)
model_type = "flux" model_type = "flux"
@ -233,11 +233,11 @@ async def generate_image(request: ImageRequest) -> AsyncGenerator[ChatMessage, N
# Get initial time estimate, scaled by resolution # Get initial time estimate, scaled by resolution
estimates = TIME_ESTIMATES[model_type][device] estimates = TIME_ESTIMATES[model_type][device]
resolution_scale = (request.height * request.width) / (512 * 512) resolution_scale = (request.height * request.width) / (512 * 512)
estimated_total = estimates["load"] + estimates["per_step"] * request.iterations * resolution_scale estimates["load"] + estimates["per_step"] * request.iterations * resolution_scale
# Initialize or get cached pipeline # Initialize or get cached pipeline
start_time = time.time() start_time = time.time()
yield status(session_id, f"Loading generative image model...") yield status(session_id, "Loading generative image model...")
pipe = await model_cache.get_pipeline(request.model, device) pipe = await model_cache.get_pipeline(request.model, device)
load_time = time.time() - start_time load_time = time.time() - start_time
yield status(session_id, f"Model loaded in {load_time:.1f} seconds.",) yield status(session_id, f"Model loaded in {load_time:.1f} seconds.",)

View File

@ -289,7 +289,7 @@ class MFAData(BaseModel):
class MFARequestResponse(BaseModel): class MFARequestResponse(BaseModel):
mfa_required: bool = Field(..., alias=str("mfaRequired")) mfa_required: bool = Field(..., alias=str("mfaRequired"))
mfa_data: Optional[MFAData] = Field(None, alias=str("mfaData")) mfa_data: Optional[MFAData] = Field(default=None, alias=str("mfaData"))
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
class ResendVerificationRequest(BaseModel): class ResendVerificationRequest(BaseModel):
@ -312,11 +312,11 @@ class Location(BaseModel):
city: str city: str
state: Optional[str] = None state: Optional[str] = None
country: str country: str
postal_code: Optional[str] = Field(None, alias=str("postalCode")) postal_code: Optional[str] = Field(default=None, alias=str("postalCode"))
latitude: Optional[float] = None latitude: Optional[float] = None
longitude: Optional[float] = None longitude: Optional[float] = None
remote: Optional[bool] = None remote: Optional[bool] = None
hybrid_options: Optional[List[str]] = Field(None, alias=str("hybridOptions")) hybrid_options: Optional[List[str]] = Field(default=None, alias=str("hybridOptions"))
address: Optional[str] = None address: Optional[str] = None
class Skill(BaseModel): class Skill(BaseModel):
@ -324,14 +324,14 @@ class Skill(BaseModel):
name: str name: str
category: str category: str
level: SkillLevel level: SkillLevel
years_of_experience: Optional[int] = Field(None, alias=str("yearsOfExperience")) years_of_experience: Optional[int] = Field(default=None, alias=str("yearsOfExperience"))
class WorkExperience(BaseModel): class WorkExperience(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4())) id: str = Field(default_factory=lambda: str(uuid.uuid4()))
company_name: str = Field(..., alias=str("companyName")) company_name: str = Field(..., alias=str("companyName"))
position: str position: str
start_date: datetime = Field(..., alias=str("startDate")) start_date: datetime = Field(..., alias=str("startDate"))
end_date: Optional[datetime] = Field(None, alias=str("endDate")) end_date: Optional[datetime] = Field(default=None, alias=str("endDate"))
is_current: bool = Field(..., alias=str("isCurrent")) is_current: bool = Field(..., alias=str("isCurrent"))
description: str description: str
skills: List[str] skills: List[str]
@ -344,7 +344,7 @@ class Education(BaseModel):
degree: str degree: str
field_of_study: str = Field(..., alias=str("fieldOfStudy")) field_of_study: str = Field(..., alias=str("fieldOfStudy"))
start_date: datetime = Field(..., alias=str("startDate")) start_date: datetime = Field(..., alias=str("startDate"))
end_date: Optional[datetime] = Field(None, alias=str("endDate")) end_date: Optional[datetime] = Field(default=None, alias=str("endDate"))
is_current: bool = Field(..., alias=str("isCurrent")) is_current: bool = Field(..., alias=str("isCurrent"))
gpa: Optional[float] = None gpa: Optional[float] = None
achievements: Optional[List[str]] = None achievements: Optional[List[str]] = None
@ -359,9 +359,9 @@ class Certification(BaseModel):
name: str name: str
issuing_organization: str = Field(..., alias=str("issuingOrganization")) issuing_organization: str = Field(..., alias=str("issuingOrganization"))
issue_date: datetime = Field(..., alias=str("issueDate")) issue_date: datetime = Field(..., alias=str("issueDate"))
expiration_date: Optional[datetime] = Field(None, alias=str("expirationDate")) expiration_date: Optional[datetime] = Field(default=None, alias=str("expirationDate"))
credential_id: Optional[str] = Field(None, alias=str("credentialId")) credential_id: Optional[str] = Field(default=None, alias=str("credentialId"))
credential_url: Optional[HttpUrl] = Field(None, alias=str("credentialUrl")) credential_url: Optional[HttpUrl] = Field(default=None, alias=str("credentialUrl"))
class SocialLink(BaseModel): class SocialLink(BaseModel):
platform: SocialPlatform platform: SocialPlatform
@ -391,7 +391,7 @@ class RefreshToken(BaseModel):
device: str device: str
ip_address: str = Field(..., alias=str("ipAddress")) ip_address: str = Field(..., alias=str("ipAddress"))
is_revoked: bool = Field(..., alias=str("isRevoked")) is_revoked: bool = Field(..., alias=str("isRevoked"))
revoked_reason: Optional[str] = Field(None, alias=str("revokedReason")) revoked_reason: Optional[str] = Field(default=None, alias=str("revokedReason"))
class Attachment(BaseModel): class Attachment(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4())) id: str = Field(default_factory=lambda: str(uuid.uuid4()))
@ -401,8 +401,8 @@ class Attachment(BaseModel):
file_url: str = Field(..., alias=str("fileUrl")) file_url: str = Field(..., alias=str("fileUrl"))
uploaded_at: datetime = Field(..., alias=str("uploadedAt")) uploaded_at: datetime = Field(..., alias=str("uploadedAt"))
is_processed: bool = Field(..., alias=str("isProcessed")) is_processed: bool = Field(..., alias=str("isProcessed"))
processing_result: Optional[Any] = Field(None, alias=str("processingResult")) processing_result: Optional[Any] = Field(default=None, alias=str("processingResult"))
thumbnail_url: Optional[str] = Field(None, alias=str("thumbnailUrl")) thumbnail_url: Optional[str] = Field(default=None, alias=str("thumbnailUrl"))
class MessageReaction(BaseModel): class MessageReaction(BaseModel):
user_id: str = Field(..., alias=str("userId")) user_id: str = Field(..., alias=str("userId"))
@ -438,22 +438,22 @@ class AccessibilitySettings(BaseModel):
high_contrast: bool = Field(..., alias=str("highContrast")) high_contrast: bool = Field(..., alias=str("highContrast"))
reduce_motion: bool = Field(..., alias=str("reduceMotion")) reduce_motion: bool = Field(..., alias=str("reduceMotion"))
screen_reader: bool = Field(..., alias=str("screenReader")) screen_reader: bool = Field(..., alias=str("screenReader"))
color_blind_mode: Optional[ColorBlindMode] = Field(None, alias=str("colorBlindMode")) color_blind_mode: Optional[ColorBlindMode] = Field(default=None, alias=str("colorBlindMode"))
class ProcessingStep(BaseModel): class ProcessingStep(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4())) id: str = Field(default_factory=lambda: str(uuid.uuid4()))
type: ProcessingStepType type: ProcessingStepType
parameters: Dict[str, Any] parameters: Dict[str, Any]
order: int order: int
depends_on: Optional[List[str]] = Field(None, alias=str("dependsOn")) depends_on: Optional[List[str]] = Field(default=None, alias=str("dependsOn"))
class RetrievalParameters(BaseModel): class RetrievalParameters(BaseModel):
search_type: SearchType = Field(..., alias=str("searchType")) search_type: SearchType = Field(..., alias=str("searchType"))
top_k: int = Field(..., alias=str("topK")) top_k: int = Field(..., alias=str("topK"))
similarity_threshold: Optional[float] = Field(None, alias=str("similarityThreshold")) similarity_threshold: Optional[float] = Field(default=None, alias=str("similarityThreshold"))
reranker_model: Optional[str] = Field(None, alias=str("rerankerModel")) reranker_model: Optional[str] = Field(default=None, alias=str("rerankerModel"))
use_keyword_boost: bool = Field(..., alias=str("useKeywordBoost")) use_keyword_boost: bool = Field(..., alias=str("useKeywordBoost"))
filter_options: Optional[Dict[str, Any]] = Field(None, alias=str("filterOptions")) filter_options: Optional[Dict[str, Any]] = Field(default=None, alias=str("filterOptions"))
context_window: int = Field(..., alias=str("contextWindow")) context_window: int = Field(..., alias=str("contextWindow"))
class ErrorDetail(BaseModel): class ErrorDetail(BaseModel):
@ -482,8 +482,8 @@ class BaseUser(BaseUserWithType):
location: Optional[Location] = None location: Optional[Location] = None
created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("createdAt")) created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("createdAt"))
updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("updatedAt")) updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("updatedAt"))
last_login: Optional[datetime] = Field(None, alias=str("lastLogin")) last_login: Optional[datetime] = Field(default=None, alias=str("lastLogin"))
profile_image: Optional[str] = Field(None, alias=str("profileImage")) profile_image: Optional[str] = Field(default=None, alias=str("profileImage"))
status: UserStatus status: UserStatus
is_admin: bool = Field(default=False, alias=str("isAdmin")) is_admin: bool = Field(default=False, alias=str("isAdmin"))
@ -500,8 +500,8 @@ class RagContentMetadata(BaseModel):
line_begin: int = Field(..., alias=str("lineBegin")) line_begin: int = Field(..., alias=str("lineBegin"))
line_end: int = Field(..., alias=str("lineEnd")) line_end: int = Field(..., alias=str("lineEnd"))
lines: int lines: int
chunk_begin: Optional[int] = Field(None, alias=str("chunkBegin")) chunk_begin: Optional[int] = Field(default=None, alias=str("chunkBegin"))
chunk_end: Optional[int] = Field(None, alias=str("chunkEnd")) chunk_end: Optional[int] = Field(default=None, alias=str("chunkEnd"))
metadata: Dict[str, Any] = Field(default_factory=dict) metadata: Dict[str, Any] = Field(default_factory=dict)
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
@ -566,13 +566,13 @@ class Candidate(BaseUser):
experience: Optional[List[WorkExperience]] = None experience: Optional[List[WorkExperience]] = None
questions: Optional[List[CandidateQuestion]] = None questions: Optional[List[CandidateQuestion]] = None
education: Optional[List[Education]] = None education: Optional[List[Education]] = None
preferred_job_types: Optional[List[EmploymentType]] = Field(None, alias=str("preferredJobTypes")) preferred_job_types: Optional[List[EmploymentType]] = Field(default=None, alias=str("preferredJobTypes"))
desired_salary: Optional[DesiredSalary] = Field(None, alias=str("desiredSalary")) desired_salary: Optional[DesiredSalary] = Field(default=None, alias=str("desiredSalary"))
availability_date: Optional[datetime] = Field(None, alias=str("availabilityDate")) availability_date: Optional[datetime] = Field(default=None, alias=str("availabilityDate"))
summary: Optional[str] = None summary: Optional[str] = None
languages: Optional[List[Language]] = None languages: Optional[List[Language]] = None
certifications: Optional[List[Certification]] = None certifications: Optional[List[Certification]] = None
job_applications: Optional[List["JobApplication"]] = Field(None, alias=str("jobApplications")) job_applications: Optional[List["JobApplication"]] = Field(default=None, alias=str("jobApplications"))
rags: List[RagEntry] = Field(default_factory=list) rags: List[RagEntry] = Field(default_factory=list)
rag_content_size : int = 0 rag_content_size : int = 0
is_public: bool = Field(default=True, alias=str("isPublic")) is_public: bool = Field(default=True, alias=str("isPublic"))
@ -591,20 +591,20 @@ class Employer(BaseUser):
description: Optional[str] = None description: Optional[str] = None
company_size: str = Field(..., alias=str("companySize")) company_size: str = Field(..., alias=str("companySize"))
company_description: str = Field(..., alias=str("companyDescription")) company_description: str = Field(..., alias=str("companyDescription"))
website_url: Optional[HttpUrl] = Field(None, alias=str("websiteUrl")) website_url: Optional[HttpUrl] = Field(default=None, alias=str("websiteUrl"))
jobs: Optional[List["Job"]] = None jobs: Optional[List["Job"]] = None
company_logo: Optional[str] = Field(None, alias=str("companyLogo")) company_logo: Optional[str] = Field(default=None, alias=str("companyLogo"))
social_links: Optional[List[SocialLink]] = Field(None, alias=str("socialLinks")) social_links: Optional[List[SocialLink]] = Field(default=None, alias=str("socialLinks"))
poc: Optional[PointOfContact] = None poc: Optional[PointOfContact] = None
class Guest(BaseUser): class Guest(BaseUser):
user_type: UserType = Field(UserType.GUEST, alias=str("userType")) user_type: UserType = Field(UserType.GUEST, alias=str("userType"))
session_id: str = Field(..., alias=str("sessionId")) session_id: str = Field(..., alias=str("sessionId"))
username: str # Add username for consistency with other user types username: str # Add username for consistency with other user types
converted_to_user_id: Optional[str] = Field(None, alias=str("convertedToUserId")) converted_to_user_id: Optional[str] = Field(default=None, alias=str("convertedToUserId"))
ip_address: Optional[str] = Field(None, alias=str("ipAddress")) ip_address: Optional[str] = Field(default=None, alias=str("ipAddress"))
created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("createdAt")) created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("createdAt"))
user_agent: Optional[str] = Field(None, alias=str("userAgent")) user_agent: Optional[str] = Field(default=None, alias=str("userAgent"))
rag_content_size: int = 0 rag_content_size: int = 0
is_public: bool = Field(default=False, alias=str("isPublic")) is_public: bool = Field(default=False, alias=str("isPublic"))
model_config = ConfigDict(populate_by_name=True, use_enum_values=True) model_config = ConfigDict(populate_by_name=True, use_enum_values=True)
@ -614,14 +614,14 @@ class Authentication(BaseModel):
password_hash: str = Field(..., alias=str("passwordHash")) password_hash: str = Field(..., alias=str("passwordHash"))
salt: str salt: str
refresh_tokens: List[RefreshToken] = Field(..., alias=str("refreshTokens")) refresh_tokens: List[RefreshToken] = Field(..., alias=str("refreshTokens"))
reset_password_token: Optional[str] = Field(None, alias=str("resetPasswordToken")) reset_password_token: Optional[str] = Field(default=None, alias=str("resetPasswordToken"))
reset_password_expiry: Optional[datetime] = Field(None, alias=str("resetPasswordExpiry")) reset_password_expiry: Optional[datetime] = Field(default=None, alias=str("resetPasswordExpiry"))
last_password_change: datetime = Field(..., alias=str("lastPasswordChange")) last_password_change: datetime = Field(..., alias=str("lastPasswordChange"))
mfa_enabled: bool = Field(..., alias=str("mfaEnabled")) mfa_enabled: bool = Field(..., alias=str("mfaEnabled"))
mfa_method: Optional[MFAMethod] = Field(None, alias=str("mfaMethod")) mfa_method: Optional[MFAMethod] = Field(default=None, alias=str("mfaMethod"))
mfa_secret: Optional[str] = Field(None, alias=str("mfaSecret")) mfa_secret: Optional[str] = Field(default=None, alias=str("mfaSecret"))
login_attempts: int = Field(..., alias=str("loginAttempts")) login_attempts: int = Field(..., alias=str("loginAttempts"))
locked_until: Optional[datetime] = Field(None, alias=str("lockedUntil")) locked_until: Optional[datetime] = Field(default=None, alias=str("lockedUntil"))
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
class AuthResponse(BaseModel): class AuthResponse(BaseModel):
@ -655,23 +655,23 @@ class JobRequirements(BaseModel):
experience: Optional[List[str]] = [] experience: Optional[List[str]] = []
education: Optional[List[str]] = [] education: Optional[List[str]] = []
certifications: Optional[List[str]] = [] certifications: Optional[List[str]] = []
preferred_attributes: Optional[List[str]] = Field(None, alias=str("preferredAttributes")) preferred_attributes: Optional[List[str]] = Field(default=None, alias=str("preferredAttributes"))
company_values: Optional[List[str]] = Field(None, alias=str("companyValues")) company_values: Optional[List[str]] = Field(default=None, alias=str("companyValues"))
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
class JobDetails(BaseModel): class JobDetails(BaseModel):
location: Location location: Location
salary_range: Optional[SalaryRange] = Field(None, alias=str("salaryRange")) salary_range: Optional[SalaryRange] = Field(default=None, alias=str("salaryRange"))
employment_type: EmploymentType = Field(..., alias=str("employmentType")) employment_type: EmploymentType = Field(..., alias=str("employmentType"))
date_posted: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("datePosted")) date_posted: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("datePosted"))
application_deadline: Optional[datetime] = Field(None, alias=str("applicationDeadline")) application_deadline: Optional[datetime] = Field(default=None, alias=str("applicationDeadline"))
is_active: bool = Field(..., alias=str("isActive")) is_active: bool = Field(..., alias=str("isActive"))
applicants: Optional[List["JobApplication"]] = None applicants: Optional[List["JobApplication"]] = None
department: Optional[str] = None department: Optional[str] = None
reports_to: Optional[str] = Field(None, alias=str("reportsTo")) reports_to: Optional[str] = Field(default=None, alias=str("reportsTo"))
benefits: Optional[List[str]] = None benefits: Optional[List[str]] = None
visa_sponsorship: Optional[bool] = Field(None, alias=str("visaSponsorship")) visa_sponsorship: Optional[bool] = Field(default=None, alias=str("visaSponsorship"))
featured_until: Optional[datetime] = Field(None, alias=str("featuredUntil")) featured_until: Optional[datetime] = Field(default=None, alias=str("featuredUntil"))
views: int = 0 views: int = 0
application_count: int = Field(0, alias=str("applicationCount")) application_count: int = Field(0, alias=str("applicationCount"))
@ -704,7 +704,7 @@ class InterviewFeedback(BaseModel):
created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("createdAt")) created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("createdAt"))
updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("updatedAt")) updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("updatedAt"))
is_visible: bool = Field(..., alias=str("isVisible")) is_visible: bool = Field(..., alias=str("isVisible"))
skill_assessments: Optional[List[SkillAssessment]] = Field(None, alias=str("skillAssessments")) skill_assessments: Optional[List[SkillAssessment]] = Field(default=None, alias=str("skillAssessments"))
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
class InterviewSchedule(BaseModel): class InterviewSchedule(BaseModel):
@ -718,7 +718,7 @@ class InterviewSchedule(BaseModel):
notes: Optional[str] = None notes: Optional[str] = None
feedback: Optional[InterviewFeedback] = None feedback: Optional[InterviewFeedback] = None
status: Literal["scheduled", "completed", "cancelled", "rescheduled"] status: Literal["scheduled", "completed", "cancelled", "rescheduled"]
meeting_link: Optional[HttpUrl] = Field(None, alias=str("meetingLink")) meeting_link: Optional[HttpUrl] = Field(default=None, alias=str("meetingLink"))
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
class JobApplication(BaseModel): class JobApplication(BaseModel):
@ -729,11 +729,11 @@ class JobApplication(BaseModel):
applied_date: datetime = Field(..., alias=str("appliedDate")) applied_date: datetime = Field(..., alias=str("appliedDate"))
updated_date: datetime = Field(..., alias=str("updatedDate")) updated_date: datetime = Field(..., alias=str("updatedDate"))
resume_version: str = Field(..., alias=str("resumeVersion")) resume_version: str = Field(..., alias=str("resumeVersion"))
cover_letter: Optional[str] = Field(None, alias=str("coverLetter")) cover_letter: Optional[str] = Field(default=None, alias=str("coverLetter"))
notes: Optional[str] = None notes: Optional[str] = None
interview_schedules: Optional[List[InterviewSchedule]] = Field(None, alias=str("interviewSchedules")) interview_schedules: Optional[List[InterviewSchedule]] = Field(default=None, alias=str("interviewSchedules"))
custom_questions: Optional[List[CustomQuestion]] = Field(None, alias=str("customQuestions")) custom_questions: Optional[List[CustomQuestion]] = Field(default=None, alias=str("customQuestions"))
candidate_contact: Optional[CandidateContact] = Field(None, alias=str("candidateContact")) candidate_contact: Optional[CandidateContact] = Field(default=None, alias=str("candidateContact"))
decision: Optional[ApplicationDecision] = None decision: Optional[ApplicationDecision] = None
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
@ -749,8 +749,8 @@ class GuestSessionResponse(BaseModel):
class ChatContext(BaseModel): class ChatContext(BaseModel):
type: ChatContextType type: ChatContextType
related_entity_id: Optional[str] = Field(None, alias=str("relatedEntityId")) related_entity_id: Optional[str] = Field(default=None, alias=str("relatedEntityId"))
related_entity_type: Optional[Literal["job", "candidate", "employer"]] = Field(None, alias=str("relatedEntityType")) related_entity_type: Optional[Literal["job", "candidate", "employer"]] = Field(default=None, alias=str("relatedEntityType"))
additional_context: Optional[Dict[str, Any]] = Field({}, alias=str("additionalContext")) additional_context: Optional[Dict[str, Any]] = Field({}, alias=str("additionalContext"))
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
@ -774,7 +774,7 @@ class RateLimitResult(BaseModel):
"""Result of rate limit check""" """Result of rate limit check"""
allowed: bool allowed: bool
reason: Optional[str] = None reason: Optional[str] = None
retry_after_seconds: Optional[int] = Field(None, alias=str("retryAfterSeconds")) retry_after_seconds: Optional[int] = Field(default=None, alias=str("retryAfterSeconds"))
remaining_requests: Dict[str, int] = Field(default_factory=dict, alias=str("remainingRequests")) remaining_requests: Dict[str, int] = Field(default_factory=dict, alias=str("remainingRequests"))
reset_times: Dict[str, datetime] = Field(default_factory=dict, alias=str("resetTimes")) reset_times: Dict[str, datetime] = Field(default_factory=dict, alias=str("resetTimes"))
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
@ -804,11 +804,11 @@ class GuestConversionRequest(BaseModel):
phone: Optional[str] = None phone: Optional[str] = None
# Employer-specific fields (optional) # Employer-specific fields (optional)
company_name: Optional[str] = Field(None, alias=str("companyName")) company_name: Optional[str] = Field(default=None, alias=str("companyName"))
industry: Optional[str] = None industry: Optional[str] = None
company_size: Optional[str] = Field(None, alias=str("companySize")) company_size: Optional[str] = Field(default=None, alias=str("companySize"))
company_description: Optional[str] = Field(None, alias=str("companyDescription")) company_description: Optional[str] = Field(default=None, alias=str("companyDescription"))
website_url: Optional[HttpUrl] = Field(None, alias=str("websiteUrl")) website_url: Optional[HttpUrl] = Field(default=None, alias=str("websiteUrl"))
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
@field_validator('username') @field_validator('username')
@ -927,7 +927,7 @@ class ChatMessage(ChatMessageUser):
#attachments: Optional[List[Attachment]] = None #attachments: Optional[List[Attachment]] = None
#reactions: Optional[List[MessageReaction]] = None #reactions: Optional[List[MessageReaction]] = None
#is_edited: bool = Field(False, alias=str("isEdited")) #is_edited: bool = Field(False, alias=str("isEdited"))
#edit_history: Optional[List[EditHistory]] = Field(None, alias=str("editHistory")) #edit_history: Optional[List[EditHistory]] = Field(default=None, alias=str("editHistory"))
class ChatMessageSkillAssessment(ChatMessageUser): class ChatMessageSkillAssessment(ChatMessageUser):
role: ChatSenderType = ChatSenderType.ASSISTANT role: ChatSenderType = ChatSenderType.ASSISTANT
@ -938,8 +938,8 @@ class ChatMessageResume(ChatMessageUser):
role: ChatSenderType = ChatSenderType.ASSISTANT role: ChatSenderType = ChatSenderType.ASSISTANT
metadata: ChatMessageMetaData = Field(default=ChatMessageMetaData()) metadata: ChatMessageMetaData = Field(default=ChatMessageMetaData())
resume: str = Field(..., alias=str("resume")) resume: str = Field(..., alias=str("resume"))
system_prompt: Optional[str] = Field(None, alias=str("systemPrompt")) system_prompt: Optional[str] = Field(default=None, alias=str("systemPrompt"))
prompt: Optional[str] = Field(None, alias=str("prompt")) prompt: Optional[str] = Field(default=None, alias=str("prompt"))
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
class Resume(BaseModel): class Resume(BaseModel):
@ -974,15 +974,15 @@ class SystemInfo(BaseModel):
class ChatSession(BaseModel): class ChatSession(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4())) id: str = Field(default_factory=lambda: str(uuid.uuid4()))
user_id: Optional[str] = Field(None, alias=str("userId")) user_id: Optional[str] = Field(default=None, alias=str("userId"))
guest_id: Optional[str] = Field(None, alias=str("guestId")) guest_id: Optional[str] = Field(default=None, alias=str("guestId"))
created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("createdAt")) created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("createdAt"))
last_activity: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("lastActivity")) last_activity: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("lastActivity"))
title: Optional[str] = None title: Optional[str] = None
context: ChatContext context: ChatContext
# messages: Optional[List[ChatMessage]] = None # messages: Optional[List[ChatMessage]] = None
is_archived: bool = Field(False, alias=str("isArchived")) is_archived: bool = Field(False, alias=str("isArchived"))
system_prompt: Optional[str] = Field(None, alias=str("systemPrompt")) system_prompt: Optional[str] = Field(default=None, alias=str("systemPrompt"))
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
@model_validator(mode="after") @model_validator(mode="after")
@ -998,10 +998,10 @@ class DataSourceConfiguration(BaseModel):
source_type: DataSourceType = Field(..., alias=str("sourceType")) source_type: DataSourceType = Field(..., alias=str("sourceType"))
connection_details: Dict[str, Any] = Field(..., alias=str("connectionDetails")) connection_details: Dict[str, Any] = Field(..., alias=str("connectionDetails"))
processing_pipeline: List[ProcessingStep] = Field(..., alias=str("processingPipeline")) processing_pipeline: List[ProcessingStep] = Field(..., alias=str("processingPipeline"))
refresh_schedule: Optional[str] = Field(None, alias=str("refreshSchedule")) refresh_schedule: Optional[str] = Field(default=None, alias=str("refreshSchedule"))
last_refreshed: Optional[datetime] = Field(None, alias=str("lastRefreshed")) last_refreshed: Optional[datetime] = Field(default=None, alias=str("lastRefreshed"))
status: Literal["active", "pending", "error", "processing"] status: Literal["active", "pending", "error", "processing"]
error_details: Optional[str] = Field(None, alias=str("errorDetails")) error_details: Optional[str] = Field(default=None, alias=str("errorDetails"))
metadata: Optional[Dict[str, Any]] = None metadata: Optional[Dict[str, Any]] = None
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
@ -1022,14 +1022,14 @@ class RAGConfiguration(BaseModel):
class UserActivity(BaseModel): class UserActivity(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4())) id: str = Field(default_factory=lambda: str(uuid.uuid4()))
user_id: Optional[str] = Field(None, alias=str("userId")) user_id: Optional[str] = Field(default=None, alias=str("userId"))
guest_id: Optional[str] = Field(None, alias=str("guestId")) guest_id: Optional[str] = Field(default=None, alias=str("guestId"))
activity_type: ActivityType = Field(..., alias=str("activityType")) activity_type: ActivityType = Field(..., alias=str("activityType"))
timestamp: datetime timestamp: datetime
metadata: Dict[str, Any] metadata: Dict[str, Any]
ip_address: Optional[str] = Field(None, alias=str("ipAddress")) ip_address: Optional[str] = Field(default=None, alias=str("ipAddress"))
user_agent: Optional[str] = Field(None, alias=str("userAgent")) user_agent: Optional[str] = Field(default=None, alias=str("userAgent"))
session_id: Optional[str] = Field(None, alias=str("sessionId")) session_id: Optional[str] = Field(default=None, alias=str("sessionId"))
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
@model_validator(mode="after") @model_validator(mode="after")
@ -1054,7 +1054,7 @@ class UserPreference(BaseModel):
theme: ThemePreference theme: ThemePreference
notifications: List[NotificationPreference] notifications: List[NotificationPreference]
accessibility: AccessibilitySettings accessibility: AccessibilitySettings
dashboard_layout: Optional[Dict[str, Any]] = Field(None, alias=str("dashboardLayout")) dashboard_layout: Optional[Dict[str, Any]] = Field(default=None, alias=str("dashboardLayout"))
language: str language: str
timezone: str timezone: str
email_frequency: Literal["immediate", "daily", "weekly", "never"] = Field(..., alias=str("emailFrequency")) email_frequency: Literal["immediate", "daily", "weekly", "never"] = Field(..., alias=str("emailFrequency"))
@ -1096,7 +1096,7 @@ class CreateEmployerRequest(BaseModel):
company_size: str = Field(..., alias=str("companySize")) company_size: str = Field(..., alias=str("companySize"))
company_description: str = Field(..., alias=str("companyDescription")) company_description: str = Field(..., alias=str("companyDescription"))
# Add other required employer fields # Add other required employer fields
website_url: Optional[str] = Field(None, alias=str("websiteUrl")) website_url: Optional[str] = Field(default=None, alias=str("websiteUrl"))
phone: Optional[str] = None phone: Optional[str] = None
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
@ -1116,14 +1116,14 @@ class CreateEmployerRequest(BaseModel):
class ChatQuery(BaseModel): class ChatQuery(BaseModel):
prompt: str prompt: str
tunables: Optional[Tunables] = None tunables: Optional[Tunables] = None
agent_options: Optional[Dict[str, Any]] = Field(None, alias=str("agentOptions")) agent_options: Optional[Dict[str, Any]] = Field(default=None, alias=str("agentOptions"))
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
class PaginatedRequest(BaseModel): class PaginatedRequest(BaseModel):
page: Annotated[int, Field(ge=1)] = 1 page: Annotated[int, Field(ge=1)] = 1
limit: Annotated[int, Field(ge=1, le=100)] = 20 limit: Annotated[int, Field(ge=1, le=100)] = 20
sort_by: Optional[str] = Field(None, alias=str("sortBy")) sort_by: Optional[str] = Field(default=None, alias=str("sortBy"))
sort_order: Optional[SortOrder] = Field(None, alias=str("sortOrder")) sort_order: Optional[SortOrder] = Field(default=None, alias=str("sortOrder"))
filters: Optional[Dict[str, Any]] = None filters: Optional[Dict[str, Any]] = None
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
@ -1132,8 +1132,8 @@ class SearchQuery(BaseModel):
filters: Optional[Dict[str, Any]] = None filters: Optional[Dict[str, Any]] = None
page: Annotated[int, Field(ge=1)] = 1 page: Annotated[int, Field(ge=1)] = 1
limit: Annotated[int, Field(ge=1, le=100)] = 20 limit: Annotated[int, Field(ge=1, le=100)] = 20
sort_by: Optional[str] = Field(None, alias=str("sortBy")) sort_by: Optional[str] = Field(default=None, alias=str("sortBy"))
sort_order: Optional[SortOrder] = Field(None, alias=str("sortOrder")) sort_order: Optional[SortOrder] = Field(default=None, alias=str("sortOrder"))
model_config = ConfigDict(populate_by_name=True) model_config = ConfigDict(populate_by_name=True)
class PaginatedResponse(BaseModel): class PaginatedResponse(BaseModel):

View File

@ -1,5 +1,5 @@
from __future__ import annotations from __future__ import annotations
from pydantic import BaseModel, field_serializer, field_validator, model_validator, Field # type: ignore from pydantic import BaseModel # type: ignore
from typing import List, Optional, Dict, Any from typing import List, Optional, Dict, Any
import os import os
import glob import glob

View File

@ -136,7 +136,7 @@ async def get_system_health(
if database_manager: if database_manager:
try: try:
database = database_manager.get_database() database_manager.get_database()
from database.manager import redis_manager from database.manager import redis_manager
redis_health = await redis_manager.health_check() redis_health = await redis_manager.health_check()
db_health = { db_health = {

View File

@ -676,7 +676,7 @@ async def request_mfa(
# Check if device is trusted # Check if device is trusted
device_manager = DeviceManager(database) device_manager = DeviceManager(database)
device_info = device_manager.parse_device_info(http_request) device_manager.parse_device_info(http_request)
is_trusted = await device_manager.is_trusted_device(user_data["id"], request.device_id) is_trusted = await device_manager.is_trusted_device(user_data["id"], request.device_id)

View File

@ -59,7 +59,7 @@ async def create_candidate_ai(
prometheus_collector=prometheus_collector) prometheus_collector=prometheus_collector)
if not generate_agent: if not generate_agent:
logger.warning(f"⚠️ Unable to create AI generation agent.") logger.warning("⚠️ Unable to create AI generation agent.")
return JSONResponse( return JSONResponse(
status_code=400, status_code=400,
content=create_error_response("AGENT_NOT_FOUND", "Unable to create AI generation agent") content=create_error_response("AGENT_NOT_FOUND", "Unable to create AI generation agent")
@ -91,7 +91,7 @@ async def create_candidate_ai(
resume_message = generated_message resume_message = generated_message
if not persona_message: if not persona_message:
logger.error(f"❌ AI generation failed: No message generated") logger.error("❌ AI generation failed: No message generated")
return JSONResponse( return JSONResponse(
status_code=500, status_code=500,
content=create_error_response("AI_GENERATION_FAILED", "Failed to generate AI candidate data") content=create_error_response("AI_GENERATION_FAILED", "Failed to generate AI candidate data")
@ -124,7 +124,7 @@ async def create_candidate_ai(
}) })
candidate = CandidateAI.model_validate(candidate_data) candidate = CandidateAI.model_validate(candidate_data)
except ValidationError as e: except ValidationError as e:
logger.error(f"❌ AI candidate data validation failed") logger.error("❌ AI candidate data validation failed")
for lines in backstory_traceback.format_exc().splitlines(): for lines in backstory_traceback.format_exc().splitlines():
logger.error(lines) logger.error(lines)
logger.error(json.dumps(persona_message.content, indent=2)) logger.error(json.dumps(persona_message.content, indent=2))
@ -134,7 +134,7 @@ async def create_candidate_ai(
status_code=400, status_code=400,
content=create_error_response("AI_VALIDATION_FAILED", "AI-generated data validation failed") content=create_error_response("AI_VALIDATION_FAILED", "AI-generated data validation failed")
) )
except Exception as e: except Exception:
# Log the error and return a validation error response # Log the error and return a validation error response
for lines in backstory_traceback.format_exc().splitlines(): for lines in backstory_traceback.format_exc().splitlines():
logger.error(lines) logger.error(lines)
@ -165,7 +165,7 @@ async def create_candidate_ai(
document_id = str(uuid.uuid4()) document_id = str(uuid.uuid4())
document_type = DocumentType.MARKDOWN document_type = DocumentType.MARKDOWN
document_content = resume_message.content.encode('utf-8') document_content = resume_message.content.encode('utf-8')
document_filename = f"resume.md" document_filename = "resume.md"
document_data = Document( document_data = Document(
id=document_id, id=document_id,
@ -313,7 +313,7 @@ async def upload_candidate_document(
# Parse the JSON string and create DocumentOptions object # Parse the JSON string and create DocumentOptions object
options_dict = json.loads(options_data) options_dict = json.loads(options_data)
options : DocumentOptions = DocumentOptions.model_validate(options_dict) options : DocumentOptions = DocumentOptions.model_validate(options_dict)
except (json.JSONDecodeError, ValidationError) as e: except (json.JSONDecodeError, ValidationError):
return StreamingResponse( return StreamingResponse(
iter([json.dumps(ChatMessageError( iter([json.dumps(ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads session_id=MOCK_UUID, # No session ID for document uploads
@ -733,7 +733,7 @@ async def get_document_content(
file_path = os.path.join(defines.user_dir, candidate.username, "rag-content" if document.options.include_in_rag else "files", document.original_name) file_path = os.path.join(defines.user_dir, candidate.username, "rag-content" if document.options.include_in_rag else "files", document.original_name)
file_path = pathlib.Path(file_path) file_path = pathlib.Path(file_path)
if not document.type in [DocumentType.TXT, DocumentType.MARKDOWN]: if document.type not in [DocumentType.TXT, DocumentType.MARKDOWN]:
file_path = file_path.with_suffix('.md') file_path = file_path.with_suffix('.md')
if not file_path.exists(): if not file_path.exists():
@ -754,7 +754,7 @@ async def get_document_content(
content=content, content=content,
size=document.size size=document.size
) )
return create_success_response(response.model_dump(by_alias=True)); return create_success_response(response.model_dump(by_alias=True))
except Exception as e: except Exception as e:
logger.error(f"❌ Failed to read document file: {e}") logger.error(f"❌ Failed to read document file: {e}")
@ -820,7 +820,7 @@ async def update_document(
dst = pathlib.Path(rag_path) dst = pathlib.Path(rag_path)
# Move to RAG directory # Move to RAG directory
src.rename(dst) src.rename(dst)
logger.info(f"📁 Moved file to RAG directory") logger.info("📁 Moved file to RAG directory")
if document.type != DocumentType.MARKDOWN and document.type != DocumentType.TXT: if document.type != DocumentType.MARKDOWN and document.type != DocumentType.TXT:
src = pathlib.Path(file_path) src = pathlib.Path(file_path)
src_as_md = src.with_suffix(".md") src_as_md = src.with_suffix(".md")
@ -832,7 +832,7 @@ async def update_document(
dst = pathlib.Path(file_path) dst = pathlib.Path(file_path)
# Move to regular files directory # Move to regular files directory
src.rename(dst) src.rename(dst)
logger.info(f"📁 Moved file to regular files directory") logger.info("📁 Moved file to regular files directory")
if document.type != DocumentType.MARKDOWN and document.type != DocumentType.TXT: if document.type != DocumentType.MARKDOWN and document.type != DocumentType.TXT:
src_as_md = src.with_suffix(".md") src_as_md = src.with_suffix(".md")
if src_as_md.exists(): if src_as_md.exists():
@ -1347,7 +1347,7 @@ async def get_candidate(
all_candidates_data = await database.get_all_candidates() all_candidates_data = await database.get_all_candidates()
if not all_candidates_data: if not all_candidates_data:
logger.warning(f"⚠️ No candidates found in database") logger.warning("⚠️ No candidates found in database")
return JSONResponse( return JSONResponse(
status_code=404, status_code=404,
content=create_error_response("NOT_FOUND", "No candidates found") content=create_error_response("NOT_FOUND", "No candidates found")
@ -1474,7 +1474,7 @@ async def get_candidate_skill_match(
if not agent: if not agent:
error_message = ChatMessageError( error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads session_id=MOCK_UUID, # No session ID for document uploads
content=f"No skill match agent found for this candidate" content="No skill match agent found for this candidate"
) )
yield error_message yield error_message
return return
@ -1517,7 +1517,7 @@ async def get_candidate_skill_match(
if final_message is None: if final_message is None:
error_message = ChatMessageError( error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads session_id=MOCK_UUID, # No session ID for document uploads
content=f"No match found for the given skill" content="No match found for the given skill"
) )
yield error_message yield error_message
return return
@ -1525,7 +1525,7 @@ async def get_candidate_skill_match(
if not isinstance(final_message, ChatMessageSkillAssessment): if not isinstance(final_message, ChatMessageSkillAssessment):
error_message = ChatMessageError( error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads session_id=MOCK_UUID, # No session ID for document uploads
content=f"Skill match response is not valid" content="Skill match response is not valid"
) )
yield error_message yield error_message
return return
@ -1535,7 +1535,7 @@ async def get_candidate_skill_match(
if not assessment: if not assessment:
error_message = ChatMessageError( error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads session_id=MOCK_UUID, # No session ID for document uploads
content=f"Skill assessment could not be generated" content="Skill assessment could not be generated"
) )
yield error_message yield error_message
return return
@ -1772,7 +1772,7 @@ async def generate_resume(
logger.error("❌ Uninitialized skill match data, cannot generate resume") logger.error("❌ Uninitialized skill match data, cannot generate resume")
error_message = ChatMessageError( error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads session_id=MOCK_UUID, # No session ID for document uploads
content=f"Uninitialized skill match data, cannot generate resume" content="Uninitialized skill match data, cannot generate resume"
) )
yield error_message yield error_message
return return
@ -1784,7 +1784,7 @@ async def generate_resume(
if not agent: if not agent:
error_message = ChatMessageError( error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads session_id=MOCK_UUID, # No session ID for document uploads
content=f"No skill match agent found for this candidate" content="No skill match agent found for this candidate"
) )
yield error_message yield error_message
return return
@ -1793,7 +1793,7 @@ async def generate_resume(
if not isinstance(agent, GenerateResume): if not isinstance(agent, GenerateResume):
error_message = ChatMessageError( error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads session_id=MOCK_UUID, # No session ID for document uploads
content=f"Agent is not a GenerateResume instance" content="Agent is not a GenerateResume instance"
) )
yield error_message yield error_message
return return
@ -1829,7 +1829,7 @@ async def generate_resume(
if final_message is None: if final_message is None:
error_message = ChatMessageError( error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads session_id=MOCK_UUID, # No session ID for document uploads
content=f"No skill match found for the given requirement" content="No skill match found for the given requirement"
) )
yield error_message yield error_message
return return
@ -1837,7 +1837,7 @@ async def generate_resume(
if not isinstance(final_message, ChatMessageResume): if not isinstance(final_message, ChatMessageResume):
error_message = ChatMessageError( error_message = ChatMessageError(
session_id=MOCK_UUID, # No session ID for document uploads session_id=MOCK_UUID, # No session ID for document uploads
content=f"Skill match response is not valid" content="Skill match response is not valid"
) )
yield error_message yield error_message
return return

View File

@ -29,36 +29,7 @@ import backstory_traceback
import entities.entity_manager as entities import entities.entity_manager as entities
from models import ( from models import (
LoginRequest, CreateCandidateRequest, CreateEmployerRequest, Candidate, ChatMessageUser, Candidate, BaseUserWithType, ChatSession, ChatMessage
Candidate, Employer, Guest, AuthResponse,
MFARequest, MFAData, MFARequestResponse, MFAVerifyRequest,
EmailVerificationRequest, ResendVerificationRequest,
# API
MOCK_UUID, ApiActivityType, ChatMessageError, ChatMessageResume,
ChatMessageSkillAssessment, ChatMessageStatus, ChatMessageStreaming,
ChatMessageUser, DocumentMessage, DocumentOptions, Job,
JobRequirements, JobRequirementsMessage, LoginRequest,
CreateCandidateRequest, CreateEmployerRequest,
# User models
Candidate, Employer, BaseUserWithType, BaseUser, Guest,
Authentication, AuthResponse, CandidateAI,
# Job models
JobApplication, ApplicationStatus,
# Chat models
ChatSession, ChatMessage, ChatContext, ChatQuery, ApiStatusType, ChatSenderType, ApiMessageType, ChatContextType,
ChatMessageRagSearch,
# Document models
Document, DocumentType, DocumentListResponse, DocumentUpdateRequest, DocumentContentResponse,
# Supporting models
Location, MFARequest, MFAData, MFARequestResponse, MFAVerifyRequest, RagContentMetadata, RagContentResponse, ResendVerificationRequest, Resume, ResumeMessage, Skill, SkillAssessment, SystemInfo, UserType, WorkExperience, Education,
# Email
EmailVerificationRequest
) )
@ -269,9 +240,9 @@ async def post_chat_session_message_stream(
chat_session_data=chat_session_data, chat_session_data=chat_session_data,
) )
except Exception as e: except Exception:
logger.error(backstory_traceback.format_exc()) logger.error(backstory_traceback.format_exc())
logger.error(f"❌ Chat message streaming error") logger.error("❌ Chat message streaming error")
return JSONResponse( return JSONResponse(
status_code=500, status_code=500,
content=create_error_response("STREAMING_ERROR", "") content=create_error_response("STREAMING_ERROR", "")

View File

@ -37,7 +37,7 @@ async def debug_guest_session(
user_lookup = await database.get_user_by_id(guest_id) user_lookup = await database.get_user_by_id(guest_id)
# Get TTL info # Get TTL info
primary_ttl = await database.redis.ttl(f"guests") primary_ttl = await database.redis.ttl("guests")
backup_ttl = await database.redis.ttl(f"guest_backup:{guest_id}") backup_ttl = await database.redis.ttl(f"guest_backup:{guest_id}")
debug_info = { debug_info = {

View File

@ -44,7 +44,7 @@ async def reformat_as_markdown(database: RedisDatabase, candidate_entity: Candid
return return
status_message = ChatMessageStatus( status_message = ChatMessageStatus(
session_id=MOCK_UUID, # No session ID for document uploads session_id=MOCK_UUID, # No session ID for document uploads
content=f"Reformatting job description as markdown...", content="Reformatting job description as markdown...",
activity=ApiActivityType.CONVERTING activity=ApiActivityType.CONVERTING
) )
yield status_message yield status_message
@ -73,9 +73,9 @@ Return only the markdown content, no other text. Make sure all content is includ
chat_message : ChatMessage = message chat_message : ChatMessage = message
try: try:
chat_message.content = chat_agent.extract_markdown_from_text(chat_message.content) chat_message.content = chat_agent.extract_markdown_from_text(chat_message.content)
except Exception as e: except Exception:
pass pass
logger.info(f"✅ Successfully converted content to markdown") logger.info("✅ Successfully converted content to markdown")
yield chat_message yield chat_message
return return
@ -114,7 +114,7 @@ async def create_job_from_content(database: RedisDatabase, current_user: Candida
return return
status_message = ChatMessageStatus( status_message = ChatMessageStatus(
session_id=MOCK_UUID, # No session ID for document uploads session_id=MOCK_UUID, # No session ID for document uploads
content=f"Analyzing document for company and requirement details...", content="Analyzing document for company and requirement details...",
activity=ApiActivityType.SEARCHING activity=ApiActivityType.SEARCHING
) )
yield status_message yield status_message
@ -178,7 +178,7 @@ async def create_candidate_job(
database: RedisDatabase = Depends(get_database) database: RedisDatabase = Depends(get_database)
): ):
"""Create a new job""" """Create a new job"""
is_employer = isinstance(current_user, Employer) isinstance(current_user, Employer)
try: try:
job = Job.model_validate(job_data) job = Job.model_validate(job_data)

View File

@ -31,7 +31,7 @@ async def get_user(
all_candidate_data = await database.get_all_candidates() all_candidate_data = await database.get_all_candidates()
if not all_candidate_data: if not all_candidate_data:
logger.warning(f"⚠️ No users found in database") logger.warning("⚠️ No users found in database")
return JSONResponse( return JSONResponse(
status_code=404, status_code=404,
content=create_error_response("NOT_FOUND", "No users found") content=create_error_response("NOT_FOUND", "No users found")
@ -48,7 +48,7 @@ async def get_user(
if not user_data: if not user_data:
all_guest_data = await database.get_all_guests() all_guest_data = await database.get_all_guests()
if not all_guest_data: if not all_guest_data:
logger.warning(f"⚠️ No guests found in database") logger.warning("⚠️ No guests found in database")
return JSONResponse( return JSONResponse(
status_code=404, status_code=404,
content=create_error_response("NOT_FOUND", "No users found") content=create_error_response("NOT_FOUND", "No users found")

View File

@ -181,7 +181,7 @@ def get_forecast(grid_endpoint):
def TickerValue(ticker_symbols): def TickerValue(ticker_symbols):
api_key = os.getenv("TWELVEDATA_API_KEY", "") api_key = os.getenv("TWELVEDATA_API_KEY", "")
if not api_key: if not api_key:
return {"error": f"Error fetching data: No API key for TwelveData"} return {"error": "Error fetching data: No API key for TwelveData"}
results = [] results = []
for ticker_symbol in ticker_symbols.split(","): for ticker_symbol in ticker_symbols.split(","):
@ -512,13 +512,13 @@ class ToolEntry(BaseModel):
tool: Tool tool: Tool
def llm_tools(tools: List[ToolEntry]) -> List[Dict[str, Any]]: def llm_tools(tools: List[ToolEntry]) -> List[Dict[str, Any]]:
return [entry.tool.model_dump(mode='json') for entry in tools if entry.enabled == True] return [entry.tool.model_dump(mode='json') for entry in tools if entry.enabled is True]
def all_tools() -> List[ToolEntry]: def all_tools() -> List[ToolEntry]:
return [ToolEntry(tool=tool) for tool in tools] return [ToolEntry(tool=tool) for tool in tools]
def enabled_tools(tools: List[ToolEntry]) -> List[ToolEntry]: def enabled_tools(tools: List[ToolEntry]) -> List[ToolEntry]:
return [ToolEntry(tool=entry.tool) for entry in tools if entry.enabled == True] return [ToolEntry(tool=entry.tool) for entry in tools if entry.enabled is True]
tool_functions = ["DateTime", "WeatherForecast", "TickerValue", "AnalyzeSite", "GenerateImage"] tool_functions = ["DateTime", "WeatherForecast", "TickerValue", "AnalyzeSite", "GenerateImage"]
__all__ = ["ToolEntry", "all_tools", "llm_tools", "enabled_tools", "tool_functions"] __all__ = ["ToolEntry", "all_tools", "llm_tools", "enabled_tools", "tool_functions"]

View File

@ -14,37 +14,8 @@ import defines
from logger import logger from logger import logger
from models import DocumentType from models import DocumentType
from models import ( from models import (
LoginRequest, CreateCandidateRequest, CreateEmployerRequest, Job,
Candidate, Employer, Guest, AuthResponse, ChatMessage, DocumentType, ApiStatusType
MFARequest, MFAData, MFARequestResponse, MFAVerifyRequest,
EmailVerificationRequest, ResendVerificationRequest,
# API
MOCK_UUID, ApiActivityType, ChatMessageError, ChatMessageResume,
ChatMessageSkillAssessment, ChatMessageStatus, ChatMessageStreaming,
ChatMessageUser, DocumentMessage, DocumentOptions, Job,
JobRequirements, JobRequirementsMessage, LoginRequest,
CreateCandidateRequest, CreateEmployerRequest,
# User models
Candidate, Employer, BaseUserWithType, BaseUser, Guest,
Authentication, AuthResponse, CandidateAI,
# Job models
JobApplication, ApplicationStatus,
# Chat models
ChatSession, ChatMessage, ChatContext, ChatQuery, ChatSenderType, ApiMessageType, ChatContextType,
ChatMessageRagSearch,
# Document models
Document, DocumentType, DocumentListResponse, DocumentUpdateRequest, DocumentContentResponse,
# Supporting models
Location, MFARequest, MFAData, MFARequestResponse, MFAVerifyRequest, RagContentMetadata, RagContentResponse, ResendVerificationRequest, Resume, ResumeMessage, Skill, SkillAssessment, SystemInfo, UserType, WorkExperience, Education,
# Email
EmailVerificationRequest,
ApiStatusType
) )
from typing import List, Dict from typing import List, Dict
@ -103,7 +74,6 @@ async def stream_agent_response(chat_agent, user_message, chat_session_data=None
"""Stream agent response with proper formatting""" """Stream agent response with proper formatting"""
async def message_stream_generator(): async def message_stream_generator():
"""Generator to stream messages with persistence""" """Generator to stream messages with persistence"""
last_log = None
final_message = None final_message = None
import utils.llm_proxy as llm_manager import utils.llm_proxy as llm_manager
@ -215,7 +185,7 @@ async def reformat_as_markdown(database, candidate_entity, content: str):
status_message = ChatMessageStatus( status_message = ChatMessageStatus(
session_id=MOCK_UUID, session_id=MOCK_UUID,
content=f"Reformatting job description as markdown...", content="Reformatting job description as markdown...",
activity=ApiActivityType.CONVERTING activity=ApiActivityType.CONVERTING
) )
yield status_message yield status_message
@ -245,10 +215,10 @@ Return only the markdown content, no other text. Make sure all content is includ
chat_message: ChatMessage = message chat_message: ChatMessage = message
try: try:
chat_message.content = chat_agent.extract_markdown_from_text(chat_message.content) chat_message.content = chat_agent.extract_markdown_from_text(chat_message.content)
except Exception as e: except Exception:
pass pass
logger.info(f"✅ Successfully converted content to markdown") logger.info("✅ Successfully converted content to markdown")
yield chat_message yield chat_message
return return
@ -298,7 +268,7 @@ async def create_job_from_content(database, current_user, content: str):
status_message = ChatMessageStatus( status_message = ChatMessageStatus(
session_id=MOCK_UUID, session_id=MOCK_UUID,
content=f"Analyzing document for company and requirement details...", content="Analyzing document for company and requirement details...",
activity=ApiActivityType.SEARCHING activity=ApiActivityType.SEARCHING
) )
yield status_message yield status_message

View File

@ -1066,7 +1066,7 @@ async def example_usage():
print(f"Model: {response.model}") print(f"Model: {response.model}")
if response.usage: if response.usage:
print(f"Usage Statistics:") print("Usage Statistics:")
print(f" Prompt tokens: {response.usage.prompt_tokens}") print(f" Prompt tokens: {response.usage.prompt_tokens}")
print(f" Completion tokens: {response.usage.completion_tokens}") print(f" Completion tokens: {response.usage.completion_tokens}")
print(f" Total tokens: {response.usage.total_tokens}") print(f" Total tokens: {response.usage.total_tokens}")

View File

@ -325,7 +325,7 @@ def rate_limited(
async def wrapper(*args, **kwargs): async def wrapper(*args, **kwargs):
# Extract dependencies from function signature # Extract dependencies from function signature
import inspect import inspect
sig = inspect.signature(func) inspect.signature(func)
# Get request, current_user, and rate_limiter from kwargs or args # Get request, current_user, and rate_limiter from kwargs or args
request = None request = None