From f1c2e1638968436044d031c670f022684835560b Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 18 Jun 2025 13:30:54 -0700 Subject: [PATCH] Reformatting with ruff --- src/backend/agents/base.py | 15 +- src/backend/agents/candidate_chat.py | 2 +- src/backend/agents/generate_image.py | 3 - src/backend/agents/generate_persona.py | 6 +- src/backend/agents/generate_resume.py | 10 +- src/backend/agents/job_requirements.py | 7 +- src/backend/agents/skill_match.py | 6 +- src/backend/database/manager.py | 4 - src/backend/database/mixins/auth.py | 4 +- src/backend/device_manager.py | 1 - src/backend/entities/entity_manager.py | 2 +- src/backend/focused_test.py | 58 +++---- src/backend/image_generator/profile_image.py | 8 +- src/backend/models.py | 154 +++++++++---------- src/backend/rag/rag.py | 2 +- src/backend/routes/admin.py | 2 +- src/backend/routes/auth.py | 2 +- src/backend/routes/candidates.py | 40 ++--- src/backend/routes/chat.py | 35 +---- src/backend/routes/debug.py | 2 +- src/backend/routes/jobs.py | 10 +- src/backend/routes/users.py | 4 +- src/backend/tools/basetools.py | 6 +- src/backend/utils/helpers.py | 42 +---- src/backend/utils/llm_proxy.py | 2 +- src/backend/utils/rate_limiter.py | 2 +- 26 files changed, 174 insertions(+), 255 deletions(-) diff --git a/src/backend/agents/base.py b/src/backend/agents/base.py index 21119f4..03ec451 100644 --- a/src/backend/agents/base.py +++ b/src/backend/agents/base.py @@ -1,5 +1,5 @@ from __future__ import annotations -from pydantic import BaseModel, Field, model_validator # type: ignore +from pydantic import BaseModel, Field # type: ignore from typing import ( Literal, get_args, @@ -13,10 +13,10 @@ import time import re from abc import ABC from datetime import datetime, UTC -from prometheus_client import Counter, Summary, CollectorRegistry # type: ignore +from prometheus_client import CollectorRegistry # type: ignore import numpy as np # type: ignore import json_extractor as json_extractor -from pydantic import BaseModel, Field, model_validator # type: ignore +from pydantic import BaseModel, Field # type: ignore from uuid import uuid4 from typing import List, Optional, ClassVar, Any, Literal @@ -24,7 +24,7 @@ from datetime import datetime, UTC import numpy as np # type: ignore from uuid import uuid4 -from prometheus_client import CollectorRegistry, Counter # type: ignore +from prometheus_client import CollectorRegistry # type: ignore import os import re from pathlib import Path @@ -509,7 +509,6 @@ Content: {content} return results : List[ChromaDBGetResponse] = [] - entries: int = 0 user: CandidateEntity = self.user for rag in user.rags: if not rag.enabled: @@ -594,7 +593,7 @@ Content: {content} status_message = ChatMessageStatus( session_id=session_id, activity=ApiActivityType.GENERATING, - content=f"Generating response..." + content="Generating response..." ) yield status_message @@ -675,7 +674,6 @@ Content: {content} session_id=session_id, content=prompt, ) - user = self.user self.user.metrics.generate_count.labels(agent=self.agent_type).inc() with self.user.metrics.generate_duration.labels(agent=self.agent_type).time(): @@ -723,7 +721,6 @@ Content: {content} LLMMessage(role="user", content=prompt) ) - llm_history = messages # use_tools = message.tunables.enable_tools and len(self.context.tools) > 0 # message.metadata.tools = { @@ -829,7 +826,7 @@ Content: {content} status_message = ChatMessageStatus( session_id=session_id, activity=ApiActivityType.GENERATING, - content=f"Generating response..." + content="Generating response..." ) yield status_message diff --git a/src/backend/agents/candidate_chat.py b/src/backend/agents/candidate_chat.py index 41faaf4..9d41307 100644 --- a/src/backend/agents/candidate_chat.py +++ b/src/backend/agents/candidate_chat.py @@ -8,7 +8,7 @@ from .registry import agent_registry from models import ( ApiMessage, Tunables, ApiStatusType) -system_message = f""" +system_message = """ When answering queries, follow these steps: - When any content from <|context|> is relevant, synthesize information from all sources to provide the most complete answer. diff --git a/src/backend/agents/generate_image.py b/src/backend/agents/generate_image.py index 2f58091..ac34f9f 100644 --- a/src/backend/agents/generate_image.py +++ b/src/backend/agents/generate_image.py @@ -1,12 +1,9 @@ from __future__ import annotations from typing import ( - Dict, Literal, ClassVar, - cast, Any, AsyncGenerator, - List, Optional # override ) # NOTE: You must import Optional for late binding to work diff --git a/src/backend/agents/generate_persona.py b/src/backend/agents/generate_persona.py index 0cf2684..042c393 100644 --- a/src/backend/agents/generate_persona.py +++ b/src/backend/agents/generate_persona.py @@ -1,10 +1,9 @@ from __future__ import annotations -from pydantic import model_validator, Field, BaseModel # type: ignore +from pydantic import Field # type: ignore from typing import ( Dict, Literal, ClassVar, - cast, Any, Tuple, AsyncGenerator, @@ -19,7 +18,6 @@ import time import time import os import random -from names_dataset import NameDataset, NameWrapper # type: ignore from .base import Agent, agent_registry from models import ApiActivityType, ChatMessage, ChatMessageError, ApiMessageType, ChatMessageStatus, ChatMessageStreaming, ApiStatusType, Tunables @@ -339,7 +337,7 @@ Incorporate the following into the job description: {original_prompt} # # Generate the persona # - logger.info(f"šŸ¤– Generating persona...") + logger.info("šŸ¤– Generating persona...") generating_message = None async for generating_message in self.llm_one_shot( llm=llm, model=model, diff --git a/src/backend/agents/generate_resume.py b/src/backend/agents/generate_resume.py index 213c3f4..14a571d 100644 --- a/src/backend/agents/generate_resume.py +++ b/src/backend/agents/generate_resume.py @@ -1,17 +1,13 @@ from __future__ import annotations -from pydantic import model_validator, Field # type: ignore from typing import ( - Dict, Literal, ClassVar, Any, AsyncGenerator, - List, - Optional + List # override ) # NOTE: You must import Optional for late binding to work import json -import numpy as np # type: ignore from logger import logger from .base import Agent, agent_registry @@ -172,7 +168,7 @@ Format it in clean, ATS-friendly markdown. Provide ONLY the resume with no comme # Stage 1A: Analyze job requirements status_message = ChatMessageStatus( session_id=session_id, - content = f"Analyzing job requirements", + content = "Analyzing job requirements", activity=ApiActivityType.THINKING ) yield status_message @@ -215,7 +211,7 @@ Format it in clean, ATS-friendly markdown. Provide ONLY the resume with no comme system_prompt=system_prompt, ) yield resume_message - logger.info(f"āœ… Resume generation completed successfully.") + logger.info("āœ… Resume generation completed successfully.") return # Register the base agent diff --git a/src/backend/agents/job_requirements.py b/src/backend/agents/job_requirements.py index 6044af9..b3e7326 100644 --- a/src/backend/agents/job_requirements.py +++ b/src/backend/agents/job_requirements.py @@ -1,18 +1,15 @@ from __future__ import annotations -from pydantic import model_validator, Field # type: ignore from typing import ( Dict, Literal, ClassVar, Any, AsyncGenerator, - List, Optional # override ) # NOTE: You must import Optional for late binding to work import inspect import json -import numpy as np # type: ignore from .base import Agent, agent_registry from models import ApiActivityType, ApiMessage, ChatMessage, ChatMessageError, ChatMessageStatus, ChatMessageStreaming, ApiStatusType, Job, JobRequirements, JobRequirementsMessage, Tunables @@ -163,7 +160,7 @@ Avoid vague categorizations and be precise about whether skills are explicitly r # Stage 1A: Analyze job requirements status_message = ChatMessageStatus( session_id=session_id, - content = f"Analyzing job requirements", + content = "Analyzing job requirements", activity=ApiActivityType.THINKING ) yield status_message @@ -238,7 +235,7 @@ Avoid vague categorizations and be precise about whether skills are explicitly r job=job, ) yield job_requirements_message - logger.info(f"āœ… Job requirements analysis completed successfully.") + logger.info("āœ… Job requirements analysis completed successfully.") return # Register the base agent diff --git a/src/backend/agents/skill_match.py b/src/backend/agents/skill_match.py index 0a21c05..ccb8e1b 100644 --- a/src/backend/agents/skill_match.py +++ b/src/backend/agents/skill_match.py @@ -1,17 +1,13 @@ from __future__ import annotations -from pydantic import model_validator, Field # type: ignore from typing import ( - Dict, Literal, ClassVar, Any, AsyncGenerator, - List, Optional # override ) # NOTE: You must import Optional for late binding to work import json -import numpy as np # type: ignore from .base import Agent, agent_registry from models import (ApiMessage, ChatMessage, ChatMessageError, ChatMessageRagSearch, ChatMessageSkillAssessment, ApiStatusType, EvidenceDetail, @@ -233,7 +229,7 @@ JSON RESPONSE:""" skill_assessment=skill_assessment, ) yield skill_assessment_message - logger.info(f"āœ… Skill assessment completed successfully.") + logger.info("āœ… Skill assessment completed successfully.") return # Register the base agent diff --git a/src/backend/database/manager.py b/src/backend/database/manager.py index de1bc13..2e61c70 100644 --- a/src/backend/database/manager.py +++ b/src/backend/database/manager.py @@ -5,10 +5,6 @@ import logging import os from datetime import datetime, UTC import asyncio -from models import ( - # User models - Candidate, Employer, BaseUser, EvidenceDetail, Guest, Authentication, AuthResponse, SkillAssessment, -) from .core import RedisDatabase logger = logging.getLogger(__name__) diff --git a/src/backend/database/mixins/auth.py b/src/backend/database/mixins/auth.py index c8c0dd5..7655238 100644 --- a/src/backend/database/mixins/auth.py +++ b/src/backend/database/mixins/auth.py @@ -374,7 +374,7 @@ class AuthMixin(DatabaseProtocol): token_data["is_revoked"] = True token_data["revoked_at"] = datetime.now(timezone.utc).isoformat() await self.redis.set(key, json.dumps(token_data, default=str)) - logger.info(f"šŸ” Revoked refresh token") + logger.info("šŸ” Revoked refresh token") return True return False except Exception as e: @@ -457,7 +457,7 @@ class AuthMixin(DatabaseProtocol): token_data["used"] = True token_data["used_at"] = datetime.now(timezone.utc).isoformat() await self.redis.set(key, json.dumps(token_data, default=str)) - logger.info(f"šŸ” Marked password reset token as used") + logger.info("šŸ” Marked password reset token as used") return True return False except Exception as e: diff --git a/src/backend/device_manager.py b/src/backend/device_manager.py index 7895cb5..cfad1c6 100644 --- a/src/backend/device_manager.py +++ b/src/backend/device_manager.py @@ -13,7 +13,6 @@ class DeviceManager: def generate_device_fingerprint(self, request: Request) -> str: """Generate device fingerprint from request""" user_agent = request.headers.get("user-agent", "") - ip_address = request.client.host if request.client else "unknown" accept_language = request.headers.get("accept-language", "") # Create fingerprint diff --git a/src/backend/entities/entity_manager.py b/src/backend/entities/entity_manager.py index 0ee8836..df65be2 100644 --- a/src/backend/entities/entity_manager.py +++ b/src/backend/entities/entity_manager.py @@ -3,7 +3,7 @@ import weakref from datetime import datetime, timedelta from typing import Dict, Optional from contextlib import asynccontextmanager -from pydantic import BaseModel, Field # type: ignore +from pydantic import BaseModel # type: ignore from models import Candidate from agents.base import CandidateEntity diff --git a/src/backend/focused_test.py b/src/backend/focused_test.py index 20fa68b..ad697ea 100644 --- a/src/backend/focused_test.py +++ b/src/backend/focused_test.py @@ -23,17 +23,18 @@ def test_model_creation(): # Create candidate candidate = Candidate( email="test@example.com", + user_type=UserType.CANDIDATE, username="test_candidate", - createdAt=datetime.now(), - updatedAt=datetime.now(), + created_at=datetime.now(), + updated_at=datetime.now(), status=UserStatus.ACTIVE, - firstName="John", - lastName="Doe", - fullName="John Doe", + first_name="John", + last_name="Doe", + full_name="John Doe", skills=[skill], experience=[], education=[], - preferredJobTypes=[EmploymentType.FULL_TIME], + preferred_job_types=[EmploymentType.FULL_TIME], location=location, languages=[], certifications=[] @@ -41,18 +42,18 @@ def test_model_creation(): # Create employer employer = Employer( - firstName="Mary", - lastName="Smith", - fullName="Mary Smith", + user_type=UserType.EMPLOYER, + first_name="Mary", + last_name="Smith", + full_name="Mary Smith", email="hr@company.com", - username="test_employer", - createdAt=datetime.now(), - updatedAt=datetime.now(), + created_at=datetime.now(), + updated_at=datetime.now(), status=UserStatus.ACTIVE, - companyName="Test Company", + company_name="Test Company", industry="Technology", - companySize="50-200", - companyDescription="A test company", + company_size="50-200", + company_description="A test company", location=location ) @@ -84,8 +85,8 @@ def test_json_api_format(): assert candidate_back.first_name == candidate.first_name assert employer_back.company_name == employer.company_name - print(f"āœ… JSON round-trip successful") - print(f"āœ… Data integrity verified") + print("āœ… JSON round-trip successful") + print("āœ… Data integrity verified") return True @@ -105,8 +106,8 @@ def test_api_dict_format(): assert "createdAt" in candidate_dict assert "companyName" in employer_dict - print(f"āœ… API format dictionaries created") - print(f"āœ… CamelCase aliases verified") + print("āœ… API format dictionaries created") + print("āœ… CamelCase aliases verified") # Test deserializing from API format candidate_back = Candidate.model_validate(candidate_dict) @@ -115,7 +116,7 @@ def test_api_dict_format(): assert candidate_back.email == candidate.email assert employer_back.company_name == employer.company_name - print(f"āœ… API format round-trip successful") + print("āœ… API format round-trip successful") return True @@ -125,15 +126,16 @@ def test_validation_constraints(): try: # Create a candidate with invalid email - invalid_candidate = Candidate( + Candidate( + user_type=UserType.CANDIDATE, email="invalid-email", username="test_invalid", - createdAt=datetime.now(), - updatedAt=datetime.now(), + created_at=datetime.now(), + updated_at=datetime.now(), status=UserStatus.ACTIVE, - firstName="Jane", - lastName="Doe", - fullName="Jane Doe" + first_name="Jane", + last_name="Doe", + full_name="Jane Doe" ) print("āŒ Validation should have failed but didn't") return False @@ -155,7 +157,7 @@ def test_enum_values(): assert candidate_dict["userType"] == "candidate" assert employer.user_type == UserType.EMPLOYER - print(f"āœ… Enum values correctly serialized") + print("āœ… Enum values correctly serialized") print(f"āœ… User types: candidate={candidate.user_type}, employer={employer.user_type}") return True @@ -172,7 +174,7 @@ def main(): test_validation_constraints() test_enum_values() - print(f"\nšŸŽ‰ All focused tests passed!") + print("\nšŸŽ‰ All focused tests passed!") print("=" * 40) print("āœ… Models work correctly") print("āœ… JSON API format works") diff --git a/src/backend/image_generator/profile_image.py b/src/backend/image_generator/profile_image.py index 699cd31..a813848 100644 --- a/src/backend/image_generator/profile_image.py +++ b/src/backend/image_generator/profile_image.py @@ -48,7 +48,7 @@ def flux_worker(pipe: Any, params: ImageRequest, status_queue: queue.Queue, task # Flux: Run generation in the background and yield progress updates status_queue.put(ChatMessageStatus( session_id=params.session_id, - content=f"Initializing image generation.", + content="Initializing image generation.", activity=ApiActivityType.GENERATING_IMAGE, )) @@ -224,7 +224,7 @@ async def generate_image(request: ImageRequest) -> AsyncGenerator[ChatMessage, N return filedir = os.path.dirname(request.filepath) - filename = os.path.basename(request.filepath) + os.path.basename(request.filepath) os.makedirs(filedir, exist_ok=True) model_type = "flux" @@ -233,11 +233,11 @@ async def generate_image(request: ImageRequest) -> AsyncGenerator[ChatMessage, N # Get initial time estimate, scaled by resolution estimates = TIME_ESTIMATES[model_type][device] resolution_scale = (request.height * request.width) / (512 * 512) - estimated_total = estimates["load"] + estimates["per_step"] * request.iterations * resolution_scale + estimates["load"] + estimates["per_step"] * request.iterations * resolution_scale # Initialize or get cached pipeline start_time = time.time() - yield status(session_id, f"Loading generative image model...") + yield status(session_id, "Loading generative image model...") pipe = await model_cache.get_pipeline(request.model, device) load_time = time.time() - start_time yield status(session_id, f"Model loaded in {load_time:.1f} seconds.",) diff --git a/src/backend/models.py b/src/backend/models.py index 494c10a..9403a2b 100644 --- a/src/backend/models.py +++ b/src/backend/models.py @@ -289,7 +289,7 @@ class MFAData(BaseModel): class MFARequestResponse(BaseModel): mfa_required: bool = Field(..., alias=str("mfaRequired")) - mfa_data: Optional[MFAData] = Field(None, alias=str("mfaData")) + mfa_data: Optional[MFAData] = Field(default=None, alias=str("mfaData")) model_config = ConfigDict(populate_by_name=True) class ResendVerificationRequest(BaseModel): @@ -312,11 +312,11 @@ class Location(BaseModel): city: str state: Optional[str] = None country: str - postal_code: Optional[str] = Field(None, alias=str("postalCode")) + postal_code: Optional[str] = Field(default=None, alias=str("postalCode")) latitude: Optional[float] = None longitude: Optional[float] = None remote: Optional[bool] = None - hybrid_options: Optional[List[str]] = Field(None, alias=str("hybridOptions")) + hybrid_options: Optional[List[str]] = Field(default=None, alias=str("hybridOptions")) address: Optional[str] = None class Skill(BaseModel): @@ -324,14 +324,14 @@ class Skill(BaseModel): name: str category: str level: SkillLevel - years_of_experience: Optional[int] = Field(None, alias=str("yearsOfExperience")) + years_of_experience: Optional[int] = Field(default=None, alias=str("yearsOfExperience")) class WorkExperience(BaseModel): id: str = Field(default_factory=lambda: str(uuid.uuid4())) company_name: str = Field(..., alias=str("companyName")) position: str start_date: datetime = Field(..., alias=str("startDate")) - end_date: Optional[datetime] = Field(None, alias=str("endDate")) + end_date: Optional[datetime] = Field(default=None, alias=str("endDate")) is_current: bool = Field(..., alias=str("isCurrent")) description: str skills: List[str] @@ -344,7 +344,7 @@ class Education(BaseModel): degree: str field_of_study: str = Field(..., alias=str("fieldOfStudy")) start_date: datetime = Field(..., alias=str("startDate")) - end_date: Optional[datetime] = Field(None, alias=str("endDate")) + end_date: Optional[datetime] = Field(default=None, alias=str("endDate")) is_current: bool = Field(..., alias=str("isCurrent")) gpa: Optional[float] = None achievements: Optional[List[str]] = None @@ -359,9 +359,9 @@ class Certification(BaseModel): name: str issuing_organization: str = Field(..., alias=str("issuingOrganization")) issue_date: datetime = Field(..., alias=str("issueDate")) - expiration_date: Optional[datetime] = Field(None, alias=str("expirationDate")) - credential_id: Optional[str] = Field(None, alias=str("credentialId")) - credential_url: Optional[HttpUrl] = Field(None, alias=str("credentialUrl")) + expiration_date: Optional[datetime] = Field(default=None, alias=str("expirationDate")) + credential_id: Optional[str] = Field(default=None, alias=str("credentialId")) + credential_url: Optional[HttpUrl] = Field(default=None, alias=str("credentialUrl")) class SocialLink(BaseModel): platform: SocialPlatform @@ -391,7 +391,7 @@ class RefreshToken(BaseModel): device: str ip_address: str = Field(..., alias=str("ipAddress")) is_revoked: bool = Field(..., alias=str("isRevoked")) - revoked_reason: Optional[str] = Field(None, alias=str("revokedReason")) + revoked_reason: Optional[str] = Field(default=None, alias=str("revokedReason")) class Attachment(BaseModel): id: str = Field(default_factory=lambda: str(uuid.uuid4())) @@ -401,8 +401,8 @@ class Attachment(BaseModel): file_url: str = Field(..., alias=str("fileUrl")) uploaded_at: datetime = Field(..., alias=str("uploadedAt")) is_processed: bool = Field(..., alias=str("isProcessed")) - processing_result: Optional[Any] = Field(None, alias=str("processingResult")) - thumbnail_url: Optional[str] = Field(None, alias=str("thumbnailUrl")) + processing_result: Optional[Any] = Field(default=None, alias=str("processingResult")) + thumbnail_url: Optional[str] = Field(default=None, alias=str("thumbnailUrl")) class MessageReaction(BaseModel): user_id: str = Field(..., alias=str("userId")) @@ -438,22 +438,22 @@ class AccessibilitySettings(BaseModel): high_contrast: bool = Field(..., alias=str("highContrast")) reduce_motion: bool = Field(..., alias=str("reduceMotion")) screen_reader: bool = Field(..., alias=str("screenReader")) - color_blind_mode: Optional[ColorBlindMode] = Field(None, alias=str("colorBlindMode")) + color_blind_mode: Optional[ColorBlindMode] = Field(default=None, alias=str("colorBlindMode")) class ProcessingStep(BaseModel): id: str = Field(default_factory=lambda: str(uuid.uuid4())) type: ProcessingStepType parameters: Dict[str, Any] order: int - depends_on: Optional[List[str]] = Field(None, alias=str("dependsOn")) + depends_on: Optional[List[str]] = Field(default=None, alias=str("dependsOn")) class RetrievalParameters(BaseModel): search_type: SearchType = Field(..., alias=str("searchType")) top_k: int = Field(..., alias=str("topK")) - similarity_threshold: Optional[float] = Field(None, alias=str("similarityThreshold")) - reranker_model: Optional[str] = Field(None, alias=str("rerankerModel")) + similarity_threshold: Optional[float] = Field(default=None, alias=str("similarityThreshold")) + reranker_model: Optional[str] = Field(default=None, alias=str("rerankerModel")) use_keyword_boost: bool = Field(..., alias=str("useKeywordBoost")) - filter_options: Optional[Dict[str, Any]] = Field(None, alias=str("filterOptions")) + filter_options: Optional[Dict[str, Any]] = Field(default=None, alias=str("filterOptions")) context_window: int = Field(..., alias=str("contextWindow")) class ErrorDetail(BaseModel): @@ -482,8 +482,8 @@ class BaseUser(BaseUserWithType): location: Optional[Location] = None created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("createdAt")) updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("updatedAt")) - last_login: Optional[datetime] = Field(None, alias=str("lastLogin")) - profile_image: Optional[str] = Field(None, alias=str("profileImage")) + last_login: Optional[datetime] = Field(default=None, alias=str("lastLogin")) + profile_image: Optional[str] = Field(default=None, alias=str("profileImage")) status: UserStatus is_admin: bool = Field(default=False, alias=str("isAdmin")) @@ -500,8 +500,8 @@ class RagContentMetadata(BaseModel): line_begin: int = Field(..., alias=str("lineBegin")) line_end: int = Field(..., alias=str("lineEnd")) lines: int - chunk_begin: Optional[int] = Field(None, alias=str("chunkBegin")) - chunk_end: Optional[int] = Field(None, alias=str("chunkEnd")) + chunk_begin: Optional[int] = Field(default=None, alias=str("chunkBegin")) + chunk_end: Optional[int] = Field(default=None, alias=str("chunkEnd")) metadata: Dict[str, Any] = Field(default_factory=dict) model_config = ConfigDict(populate_by_name=True) @@ -566,13 +566,13 @@ class Candidate(BaseUser): experience: Optional[List[WorkExperience]] = None questions: Optional[List[CandidateQuestion]] = None education: Optional[List[Education]] = None - preferred_job_types: Optional[List[EmploymentType]] = Field(None, alias=str("preferredJobTypes")) - desired_salary: Optional[DesiredSalary] = Field(None, alias=str("desiredSalary")) - availability_date: Optional[datetime] = Field(None, alias=str("availabilityDate")) + preferred_job_types: Optional[List[EmploymentType]] = Field(default=None, alias=str("preferredJobTypes")) + desired_salary: Optional[DesiredSalary] = Field(default=None, alias=str("desiredSalary")) + availability_date: Optional[datetime] = Field(default=None, alias=str("availabilityDate")) summary: Optional[str] = None languages: Optional[List[Language]] = None certifications: Optional[List[Certification]] = None - job_applications: Optional[List["JobApplication"]] = Field(None, alias=str("jobApplications")) + job_applications: Optional[List["JobApplication"]] = Field(default=None, alias=str("jobApplications")) rags: List[RagEntry] = Field(default_factory=list) rag_content_size : int = 0 is_public: bool = Field(default=True, alias=str("isPublic")) @@ -591,20 +591,20 @@ class Employer(BaseUser): description: Optional[str] = None company_size: str = Field(..., alias=str("companySize")) company_description: str = Field(..., alias=str("companyDescription")) - website_url: Optional[HttpUrl] = Field(None, alias=str("websiteUrl")) + website_url: Optional[HttpUrl] = Field(default=None, alias=str("websiteUrl")) jobs: Optional[List["Job"]] = None - company_logo: Optional[str] = Field(None, alias=str("companyLogo")) - social_links: Optional[List[SocialLink]] = Field(None, alias=str("socialLinks")) + company_logo: Optional[str] = Field(default=None, alias=str("companyLogo")) + social_links: Optional[List[SocialLink]] = Field(default=None, alias=str("socialLinks")) poc: Optional[PointOfContact] = None class Guest(BaseUser): user_type: UserType = Field(UserType.GUEST, alias=str("userType")) session_id: str = Field(..., alias=str("sessionId")) username: str # Add username for consistency with other user types - converted_to_user_id: Optional[str] = Field(None, alias=str("convertedToUserId")) - ip_address: Optional[str] = Field(None, alias=str("ipAddress")) + converted_to_user_id: Optional[str] = Field(default=None, alias=str("convertedToUserId")) + ip_address: Optional[str] = Field(default=None, alias=str("ipAddress")) created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("createdAt")) - user_agent: Optional[str] = Field(None, alias=str("userAgent")) + user_agent: Optional[str] = Field(default=None, alias=str("userAgent")) rag_content_size: int = 0 is_public: bool = Field(default=False, alias=str("isPublic")) model_config = ConfigDict(populate_by_name=True, use_enum_values=True) @@ -614,14 +614,14 @@ class Authentication(BaseModel): password_hash: str = Field(..., alias=str("passwordHash")) salt: str refresh_tokens: List[RefreshToken] = Field(..., alias=str("refreshTokens")) - reset_password_token: Optional[str] = Field(None, alias=str("resetPasswordToken")) - reset_password_expiry: Optional[datetime] = Field(None, alias=str("resetPasswordExpiry")) + reset_password_token: Optional[str] = Field(default=None, alias=str("resetPasswordToken")) + reset_password_expiry: Optional[datetime] = Field(default=None, alias=str("resetPasswordExpiry")) last_password_change: datetime = Field(..., alias=str("lastPasswordChange")) mfa_enabled: bool = Field(..., alias=str("mfaEnabled")) - mfa_method: Optional[MFAMethod] = Field(None, alias=str("mfaMethod")) - mfa_secret: Optional[str] = Field(None, alias=str("mfaSecret")) + mfa_method: Optional[MFAMethod] = Field(default=None, alias=str("mfaMethod")) + mfa_secret: Optional[str] = Field(default=None, alias=str("mfaSecret")) login_attempts: int = Field(..., alias=str("loginAttempts")) - locked_until: Optional[datetime] = Field(None, alias=str("lockedUntil")) + locked_until: Optional[datetime] = Field(default=None, alias=str("lockedUntil")) model_config = ConfigDict(populate_by_name=True) class AuthResponse(BaseModel): @@ -655,23 +655,23 @@ class JobRequirements(BaseModel): experience: Optional[List[str]] = [] education: Optional[List[str]] = [] certifications: Optional[List[str]] = [] - preferred_attributes: Optional[List[str]] = Field(None, alias=str("preferredAttributes")) - company_values: Optional[List[str]] = Field(None, alias=str("companyValues")) + preferred_attributes: Optional[List[str]] = Field(default=None, alias=str("preferredAttributes")) + company_values: Optional[List[str]] = Field(default=None, alias=str("companyValues")) model_config = ConfigDict(populate_by_name=True) class JobDetails(BaseModel): location: Location - salary_range: Optional[SalaryRange] = Field(None, alias=str("salaryRange")) + salary_range: Optional[SalaryRange] = Field(default=None, alias=str("salaryRange")) employment_type: EmploymentType = Field(..., alias=str("employmentType")) date_posted: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("datePosted")) - application_deadline: Optional[datetime] = Field(None, alias=str("applicationDeadline")) + application_deadline: Optional[datetime] = Field(default=None, alias=str("applicationDeadline")) is_active: bool = Field(..., alias=str("isActive")) applicants: Optional[List["JobApplication"]] = None department: Optional[str] = None - reports_to: Optional[str] = Field(None, alias=str("reportsTo")) + reports_to: Optional[str] = Field(default=None, alias=str("reportsTo")) benefits: Optional[List[str]] = None - visa_sponsorship: Optional[bool] = Field(None, alias=str("visaSponsorship")) - featured_until: Optional[datetime] = Field(None, alias=str("featuredUntil")) + visa_sponsorship: Optional[bool] = Field(default=None, alias=str("visaSponsorship")) + featured_until: Optional[datetime] = Field(default=None, alias=str("featuredUntil")) views: int = 0 application_count: int = Field(0, alias=str("applicationCount")) @@ -704,7 +704,7 @@ class InterviewFeedback(BaseModel): created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("createdAt")) updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("updatedAt")) is_visible: bool = Field(..., alias=str("isVisible")) - skill_assessments: Optional[List[SkillAssessment]] = Field(None, alias=str("skillAssessments")) + skill_assessments: Optional[List[SkillAssessment]] = Field(default=None, alias=str("skillAssessments")) model_config = ConfigDict(populate_by_name=True) class InterviewSchedule(BaseModel): @@ -718,7 +718,7 @@ class InterviewSchedule(BaseModel): notes: Optional[str] = None feedback: Optional[InterviewFeedback] = None status: Literal["scheduled", "completed", "cancelled", "rescheduled"] - meeting_link: Optional[HttpUrl] = Field(None, alias=str("meetingLink")) + meeting_link: Optional[HttpUrl] = Field(default=None, alias=str("meetingLink")) model_config = ConfigDict(populate_by_name=True) class JobApplication(BaseModel): @@ -729,11 +729,11 @@ class JobApplication(BaseModel): applied_date: datetime = Field(..., alias=str("appliedDate")) updated_date: datetime = Field(..., alias=str("updatedDate")) resume_version: str = Field(..., alias=str("resumeVersion")) - cover_letter: Optional[str] = Field(None, alias=str("coverLetter")) + cover_letter: Optional[str] = Field(default=None, alias=str("coverLetter")) notes: Optional[str] = None - interview_schedules: Optional[List[InterviewSchedule]] = Field(None, alias=str("interviewSchedules")) - custom_questions: Optional[List[CustomQuestion]] = Field(None, alias=str("customQuestions")) - candidate_contact: Optional[CandidateContact] = Field(None, alias=str("candidateContact")) + interview_schedules: Optional[List[InterviewSchedule]] = Field(default=None, alias=str("interviewSchedules")) + custom_questions: Optional[List[CustomQuestion]] = Field(default=None, alias=str("customQuestions")) + candidate_contact: Optional[CandidateContact] = Field(default=None, alias=str("candidateContact")) decision: Optional[ApplicationDecision] = None model_config = ConfigDict(populate_by_name=True) @@ -749,8 +749,8 @@ class GuestSessionResponse(BaseModel): class ChatContext(BaseModel): type: ChatContextType - related_entity_id: Optional[str] = Field(None, alias=str("relatedEntityId")) - related_entity_type: Optional[Literal["job", "candidate", "employer"]] = Field(None, alias=str("relatedEntityType")) + related_entity_id: Optional[str] = Field(default=None, alias=str("relatedEntityId")) + related_entity_type: Optional[Literal["job", "candidate", "employer"]] = Field(default=None, alias=str("relatedEntityType")) additional_context: Optional[Dict[str, Any]] = Field({}, alias=str("additionalContext")) model_config = ConfigDict(populate_by_name=True) @@ -774,7 +774,7 @@ class RateLimitResult(BaseModel): """Result of rate limit check""" allowed: bool reason: Optional[str] = None - retry_after_seconds: Optional[int] = Field(None, alias=str("retryAfterSeconds")) + retry_after_seconds: Optional[int] = Field(default=None, alias=str("retryAfterSeconds")) remaining_requests: Dict[str, int] = Field(default_factory=dict, alias=str("remainingRequests")) reset_times: Dict[str, datetime] = Field(default_factory=dict, alias=str("resetTimes")) model_config = ConfigDict(populate_by_name=True) @@ -804,11 +804,11 @@ class GuestConversionRequest(BaseModel): phone: Optional[str] = None # Employer-specific fields (optional) - company_name: Optional[str] = Field(None, alias=str("companyName")) + company_name: Optional[str] = Field(default=None, alias=str("companyName")) industry: Optional[str] = None - company_size: Optional[str] = Field(None, alias=str("companySize")) - company_description: Optional[str] = Field(None, alias=str("companyDescription")) - website_url: Optional[HttpUrl] = Field(None, alias=str("websiteUrl")) + company_size: Optional[str] = Field(default=None, alias=str("companySize")) + company_description: Optional[str] = Field(default=None, alias=str("companyDescription")) + website_url: Optional[HttpUrl] = Field(default=None, alias=str("websiteUrl")) model_config = ConfigDict(populate_by_name=True) @field_validator('username') @@ -927,7 +927,7 @@ class ChatMessage(ChatMessageUser): #attachments: Optional[List[Attachment]] = None #reactions: Optional[List[MessageReaction]] = None #is_edited: bool = Field(False, alias=str("isEdited")) - #edit_history: Optional[List[EditHistory]] = Field(None, alias=str("editHistory")) + #edit_history: Optional[List[EditHistory]] = Field(default=None, alias=str("editHistory")) class ChatMessageSkillAssessment(ChatMessageUser): role: ChatSenderType = ChatSenderType.ASSISTANT @@ -938,8 +938,8 @@ class ChatMessageResume(ChatMessageUser): role: ChatSenderType = ChatSenderType.ASSISTANT metadata: ChatMessageMetaData = Field(default=ChatMessageMetaData()) resume: str = Field(..., alias=str("resume")) - system_prompt: Optional[str] = Field(None, alias=str("systemPrompt")) - prompt: Optional[str] = Field(None, alias=str("prompt")) + system_prompt: Optional[str] = Field(default=None, alias=str("systemPrompt")) + prompt: Optional[str] = Field(default=None, alias=str("prompt")) model_config = ConfigDict(populate_by_name=True) class Resume(BaseModel): @@ -974,15 +974,15 @@ class SystemInfo(BaseModel): class ChatSession(BaseModel): id: str = Field(default_factory=lambda: str(uuid.uuid4())) - user_id: Optional[str] = Field(None, alias=str("userId")) - guest_id: Optional[str] = Field(None, alias=str("guestId")) + user_id: Optional[str] = Field(default=None, alias=str("userId")) + guest_id: Optional[str] = Field(default=None, alias=str("guestId")) created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("createdAt")) last_activity: datetime = Field(default_factory=lambda: datetime.now(UTC), alias=str("lastActivity")) title: Optional[str] = None context: ChatContext # messages: Optional[List[ChatMessage]] = None is_archived: bool = Field(False, alias=str("isArchived")) - system_prompt: Optional[str] = Field(None, alias=str("systemPrompt")) + system_prompt: Optional[str] = Field(default=None, alias=str("systemPrompt")) model_config = ConfigDict(populate_by_name=True) @model_validator(mode="after") @@ -998,10 +998,10 @@ class DataSourceConfiguration(BaseModel): source_type: DataSourceType = Field(..., alias=str("sourceType")) connection_details: Dict[str, Any] = Field(..., alias=str("connectionDetails")) processing_pipeline: List[ProcessingStep] = Field(..., alias=str("processingPipeline")) - refresh_schedule: Optional[str] = Field(None, alias=str("refreshSchedule")) - last_refreshed: Optional[datetime] = Field(None, alias=str("lastRefreshed")) + refresh_schedule: Optional[str] = Field(default=None, alias=str("refreshSchedule")) + last_refreshed: Optional[datetime] = Field(default=None, alias=str("lastRefreshed")) status: Literal["active", "pending", "error", "processing"] - error_details: Optional[str] = Field(None, alias=str("errorDetails")) + error_details: Optional[str] = Field(default=None, alias=str("errorDetails")) metadata: Optional[Dict[str, Any]] = None model_config = ConfigDict(populate_by_name=True) @@ -1022,14 +1022,14 @@ class RAGConfiguration(BaseModel): class UserActivity(BaseModel): id: str = Field(default_factory=lambda: str(uuid.uuid4())) - user_id: Optional[str] = Field(None, alias=str("userId")) - guest_id: Optional[str] = Field(None, alias=str("guestId")) + user_id: Optional[str] = Field(default=None, alias=str("userId")) + guest_id: Optional[str] = Field(default=None, alias=str("guestId")) activity_type: ActivityType = Field(..., alias=str("activityType")) timestamp: datetime metadata: Dict[str, Any] - ip_address: Optional[str] = Field(None, alias=str("ipAddress")) - user_agent: Optional[str] = Field(None, alias=str("userAgent")) - session_id: Optional[str] = Field(None, alias=str("sessionId")) + ip_address: Optional[str] = Field(default=None, alias=str("ipAddress")) + user_agent: Optional[str] = Field(default=None, alias=str("userAgent")) + session_id: Optional[str] = Field(default=None, alias=str("sessionId")) model_config = ConfigDict(populate_by_name=True) @model_validator(mode="after") @@ -1054,7 +1054,7 @@ class UserPreference(BaseModel): theme: ThemePreference notifications: List[NotificationPreference] accessibility: AccessibilitySettings - dashboard_layout: Optional[Dict[str, Any]] = Field(None, alias=str("dashboardLayout")) + dashboard_layout: Optional[Dict[str, Any]] = Field(default=None, alias=str("dashboardLayout")) language: str timezone: str email_frequency: Literal["immediate", "daily", "weekly", "never"] = Field(..., alias=str("emailFrequency")) @@ -1096,7 +1096,7 @@ class CreateEmployerRequest(BaseModel): company_size: str = Field(..., alias=str("companySize")) company_description: str = Field(..., alias=str("companyDescription")) # Add other required employer fields - website_url: Optional[str] = Field(None, alias=str("websiteUrl")) + website_url: Optional[str] = Field(default=None, alias=str("websiteUrl")) phone: Optional[str] = None model_config = ConfigDict(populate_by_name=True) @@ -1116,14 +1116,14 @@ class CreateEmployerRequest(BaseModel): class ChatQuery(BaseModel): prompt: str tunables: Optional[Tunables] = None - agent_options: Optional[Dict[str, Any]] = Field(None, alias=str("agentOptions")) + agent_options: Optional[Dict[str, Any]] = Field(default=None, alias=str("agentOptions")) model_config = ConfigDict(populate_by_name=True) class PaginatedRequest(BaseModel): page: Annotated[int, Field(ge=1)] = 1 limit: Annotated[int, Field(ge=1, le=100)] = 20 - sort_by: Optional[str] = Field(None, alias=str("sortBy")) - sort_order: Optional[SortOrder] = Field(None, alias=str("sortOrder")) + sort_by: Optional[str] = Field(default=None, alias=str("sortBy")) + sort_order: Optional[SortOrder] = Field(default=None, alias=str("sortOrder")) filters: Optional[Dict[str, Any]] = None model_config = ConfigDict(populate_by_name=True) @@ -1132,8 +1132,8 @@ class SearchQuery(BaseModel): filters: Optional[Dict[str, Any]] = None page: Annotated[int, Field(ge=1)] = 1 limit: Annotated[int, Field(ge=1, le=100)] = 20 - sort_by: Optional[str] = Field(None, alias=str("sortBy")) - sort_order: Optional[SortOrder] = Field(None, alias=str("sortOrder")) + sort_by: Optional[str] = Field(default=None, alias=str("sortBy")) + sort_order: Optional[SortOrder] = Field(default=None, alias=str("sortOrder")) model_config = ConfigDict(populate_by_name=True) class PaginatedResponse(BaseModel): diff --git a/src/backend/rag/rag.py b/src/backend/rag/rag.py index 4f21272..05fe91d 100644 --- a/src/backend/rag/rag.py +++ b/src/backend/rag/rag.py @@ -1,5 +1,5 @@ from __future__ import annotations -from pydantic import BaseModel, field_serializer, field_validator, model_validator, Field # type: ignore +from pydantic import BaseModel # type: ignore from typing import List, Optional, Dict, Any import os import glob diff --git a/src/backend/routes/admin.py b/src/backend/routes/admin.py index 52b13a8..e26b169 100644 --- a/src/backend/routes/admin.py +++ b/src/backend/routes/admin.py @@ -136,7 +136,7 @@ async def get_system_health( if database_manager: try: - database = database_manager.get_database() + database_manager.get_database() from database.manager import redis_manager redis_health = await redis_manager.health_check() db_health = { diff --git a/src/backend/routes/auth.py b/src/backend/routes/auth.py index 312d4f4..6efd667 100644 --- a/src/backend/routes/auth.py +++ b/src/backend/routes/auth.py @@ -676,7 +676,7 @@ async def request_mfa( # Check if device is trusted device_manager = DeviceManager(database) - device_info = device_manager.parse_device_info(http_request) + device_manager.parse_device_info(http_request) is_trusted = await device_manager.is_trusted_device(user_data["id"], request.device_id) diff --git a/src/backend/routes/candidates.py b/src/backend/routes/candidates.py index 985464e..4ec9d83 100644 --- a/src/backend/routes/candidates.py +++ b/src/backend/routes/candidates.py @@ -59,7 +59,7 @@ async def create_candidate_ai( prometheus_collector=prometheus_collector) if not generate_agent: - logger.warning(f"āš ļø Unable to create AI generation agent.") + logger.warning("āš ļø Unable to create AI generation agent.") return JSONResponse( status_code=400, content=create_error_response("AGENT_NOT_FOUND", "Unable to create AI generation agent") @@ -91,7 +91,7 @@ async def create_candidate_ai( resume_message = generated_message if not persona_message: - logger.error(f"āŒ AI generation failed: No message generated") + logger.error("āŒ AI generation failed: No message generated") return JSONResponse( status_code=500, content=create_error_response("AI_GENERATION_FAILED", "Failed to generate AI candidate data") @@ -124,7 +124,7 @@ async def create_candidate_ai( }) candidate = CandidateAI.model_validate(candidate_data) except ValidationError as e: - logger.error(f"āŒ AI candidate data validation failed") + logger.error("āŒ AI candidate data validation failed") for lines in backstory_traceback.format_exc().splitlines(): logger.error(lines) logger.error(json.dumps(persona_message.content, indent=2)) @@ -134,7 +134,7 @@ async def create_candidate_ai( status_code=400, content=create_error_response("AI_VALIDATION_FAILED", "AI-generated data validation failed") ) - except Exception as e: + except Exception: # Log the error and return a validation error response for lines in backstory_traceback.format_exc().splitlines(): logger.error(lines) @@ -165,7 +165,7 @@ async def create_candidate_ai( document_id = str(uuid.uuid4()) document_type = DocumentType.MARKDOWN document_content = resume_message.content.encode('utf-8') - document_filename = f"resume.md" + document_filename = "resume.md" document_data = Document( id=document_id, @@ -313,7 +313,7 @@ async def upload_candidate_document( # Parse the JSON string and create DocumentOptions object options_dict = json.loads(options_data) options : DocumentOptions = DocumentOptions.model_validate(options_dict) - except (json.JSONDecodeError, ValidationError) as e: + except (json.JSONDecodeError, ValidationError): return StreamingResponse( iter([json.dumps(ChatMessageError( session_id=MOCK_UUID, # No session ID for document uploads @@ -733,7 +733,7 @@ async def get_document_content( file_path = os.path.join(defines.user_dir, candidate.username, "rag-content" if document.options.include_in_rag else "files", document.original_name) file_path = pathlib.Path(file_path) - if not document.type in [DocumentType.TXT, DocumentType.MARKDOWN]: + if document.type not in [DocumentType.TXT, DocumentType.MARKDOWN]: file_path = file_path.with_suffix('.md') if not file_path.exists(): @@ -754,7 +754,7 @@ async def get_document_content( content=content, size=document.size ) - return create_success_response(response.model_dump(by_alias=True)); + return create_success_response(response.model_dump(by_alias=True)) except Exception as e: logger.error(f"āŒ Failed to read document file: {e}") @@ -820,7 +820,7 @@ async def update_document( dst = pathlib.Path(rag_path) # Move to RAG directory src.rename(dst) - logger.info(f"šŸ“ Moved file to RAG directory") + logger.info("šŸ“ Moved file to RAG directory") if document.type != DocumentType.MARKDOWN and document.type != DocumentType.TXT: src = pathlib.Path(file_path) src_as_md = src.with_suffix(".md") @@ -832,7 +832,7 @@ async def update_document( dst = pathlib.Path(file_path) # Move to regular files directory src.rename(dst) - logger.info(f"šŸ“ Moved file to regular files directory") + logger.info("šŸ“ Moved file to regular files directory") if document.type != DocumentType.MARKDOWN and document.type != DocumentType.TXT: src_as_md = src.with_suffix(".md") if src_as_md.exists(): @@ -1347,7 +1347,7 @@ async def get_candidate( all_candidates_data = await database.get_all_candidates() if not all_candidates_data: - logger.warning(f"āš ļø No candidates found in database") + logger.warning("āš ļø No candidates found in database") return JSONResponse( status_code=404, content=create_error_response("NOT_FOUND", "No candidates found") @@ -1474,7 +1474,7 @@ async def get_candidate_skill_match( if not agent: error_message = ChatMessageError( session_id=MOCK_UUID, # No session ID for document uploads - content=f"No skill match agent found for this candidate" + content="No skill match agent found for this candidate" ) yield error_message return @@ -1517,7 +1517,7 @@ async def get_candidate_skill_match( if final_message is None: error_message = ChatMessageError( session_id=MOCK_UUID, # No session ID for document uploads - content=f"No match found for the given skill" + content="No match found for the given skill" ) yield error_message return @@ -1525,7 +1525,7 @@ async def get_candidate_skill_match( if not isinstance(final_message, ChatMessageSkillAssessment): error_message = ChatMessageError( session_id=MOCK_UUID, # No session ID for document uploads - content=f"Skill match response is not valid" + content="Skill match response is not valid" ) yield error_message return @@ -1535,7 +1535,7 @@ async def get_candidate_skill_match( if not assessment: error_message = ChatMessageError( session_id=MOCK_UUID, # No session ID for document uploads - content=f"Skill assessment could not be generated" + content="Skill assessment could not be generated" ) yield error_message return @@ -1772,7 +1772,7 @@ async def generate_resume( logger.error("āŒ Uninitialized skill match data, cannot generate resume") error_message = ChatMessageError( session_id=MOCK_UUID, # No session ID for document uploads - content=f"Uninitialized skill match data, cannot generate resume" + content="Uninitialized skill match data, cannot generate resume" ) yield error_message return @@ -1784,7 +1784,7 @@ async def generate_resume( if not agent: error_message = ChatMessageError( session_id=MOCK_UUID, # No session ID for document uploads - content=f"No skill match agent found for this candidate" + content="No skill match agent found for this candidate" ) yield error_message return @@ -1793,7 +1793,7 @@ async def generate_resume( if not isinstance(agent, GenerateResume): error_message = ChatMessageError( session_id=MOCK_UUID, # No session ID for document uploads - content=f"Agent is not a GenerateResume instance" + content="Agent is not a GenerateResume instance" ) yield error_message return @@ -1829,7 +1829,7 @@ async def generate_resume( if final_message is None: error_message = ChatMessageError( session_id=MOCK_UUID, # No session ID for document uploads - content=f"No skill match found for the given requirement" + content="No skill match found for the given requirement" ) yield error_message return @@ -1837,7 +1837,7 @@ async def generate_resume( if not isinstance(final_message, ChatMessageResume): error_message = ChatMessageError( session_id=MOCK_UUID, # No session ID for document uploads - content=f"Skill match response is not valid" + content="Skill match response is not valid" ) yield error_message return diff --git a/src/backend/routes/chat.py b/src/backend/routes/chat.py index da7f220..b152833 100644 --- a/src/backend/routes/chat.py +++ b/src/backend/routes/chat.py @@ -29,36 +29,7 @@ import backstory_traceback import entities.entity_manager as entities from models import ( - LoginRequest, CreateCandidateRequest, CreateEmployerRequest, - Candidate, Employer, Guest, AuthResponse, - MFARequest, MFAData, MFARequestResponse, MFAVerifyRequest, - EmailVerificationRequest, ResendVerificationRequest, - # API - MOCK_UUID, ApiActivityType, ChatMessageError, ChatMessageResume, - ChatMessageSkillAssessment, ChatMessageStatus, ChatMessageStreaming, - ChatMessageUser, DocumentMessage, DocumentOptions, Job, - JobRequirements, JobRequirementsMessage, LoginRequest, - CreateCandidateRequest, CreateEmployerRequest, - - # User models - Candidate, Employer, BaseUserWithType, BaseUser, Guest, - Authentication, AuthResponse, CandidateAI, - - # Job models - JobApplication, ApplicationStatus, - - # Chat models - ChatSession, ChatMessage, ChatContext, ChatQuery, ApiStatusType, ChatSenderType, ApiMessageType, ChatContextType, - ChatMessageRagSearch, - - # Document models - Document, DocumentType, DocumentListResponse, DocumentUpdateRequest, DocumentContentResponse, - - # Supporting models - Location, MFARequest, MFAData, MFARequestResponse, MFAVerifyRequest, RagContentMetadata, RagContentResponse, ResendVerificationRequest, Resume, ResumeMessage, Skill, SkillAssessment, SystemInfo, UserType, WorkExperience, Education, - - # Email - EmailVerificationRequest + Candidate, ChatMessageUser, Candidate, BaseUserWithType, ChatSession, ChatMessage ) @@ -269,9 +240,9 @@ async def post_chat_session_message_stream( chat_session_data=chat_session_data, ) - except Exception as e: + except Exception: logger.error(backstory_traceback.format_exc()) - logger.error(f"āŒ Chat message streaming error") + logger.error("āŒ Chat message streaming error") return JSONResponse( status_code=500, content=create_error_response("STREAMING_ERROR", "") diff --git a/src/backend/routes/debug.py b/src/backend/routes/debug.py index 286719c..3ae8cfd 100644 --- a/src/backend/routes/debug.py +++ b/src/backend/routes/debug.py @@ -37,7 +37,7 @@ async def debug_guest_session( user_lookup = await database.get_user_by_id(guest_id) # Get TTL info - primary_ttl = await database.redis.ttl(f"guests") + primary_ttl = await database.redis.ttl("guests") backup_ttl = await database.redis.ttl(f"guest_backup:{guest_id}") debug_info = { diff --git a/src/backend/routes/jobs.py b/src/backend/routes/jobs.py index 3d80160..4e0f9b6 100644 --- a/src/backend/routes/jobs.py +++ b/src/backend/routes/jobs.py @@ -44,7 +44,7 @@ async def reformat_as_markdown(database: RedisDatabase, candidate_entity: Candid return status_message = ChatMessageStatus( session_id=MOCK_UUID, # No session ID for document uploads - content=f"Reformatting job description as markdown...", + content="Reformatting job description as markdown...", activity=ApiActivityType.CONVERTING ) yield status_message @@ -73,9 +73,9 @@ Return only the markdown content, no other text. Make sure all content is includ chat_message : ChatMessage = message try: chat_message.content = chat_agent.extract_markdown_from_text(chat_message.content) - except Exception as e: + except Exception: pass - logger.info(f"āœ… Successfully converted content to markdown") + logger.info("āœ… Successfully converted content to markdown") yield chat_message return @@ -114,7 +114,7 @@ async def create_job_from_content(database: RedisDatabase, current_user: Candida return status_message = ChatMessageStatus( session_id=MOCK_UUID, # No session ID for document uploads - content=f"Analyzing document for company and requirement details...", + content="Analyzing document for company and requirement details...", activity=ApiActivityType.SEARCHING ) yield status_message @@ -178,7 +178,7 @@ async def create_candidate_job( database: RedisDatabase = Depends(get_database) ): """Create a new job""" - is_employer = isinstance(current_user, Employer) + isinstance(current_user, Employer) try: job = Job.model_validate(job_data) diff --git a/src/backend/routes/users.py b/src/backend/routes/users.py index aca453b..6832d4c 100644 --- a/src/backend/routes/users.py +++ b/src/backend/routes/users.py @@ -31,7 +31,7 @@ async def get_user( all_candidate_data = await database.get_all_candidates() if not all_candidate_data: - logger.warning(f"āš ļø No users found in database") + logger.warning("āš ļø No users found in database") return JSONResponse( status_code=404, content=create_error_response("NOT_FOUND", "No users found") @@ -48,7 +48,7 @@ async def get_user( if not user_data: all_guest_data = await database.get_all_guests() if not all_guest_data: - logger.warning(f"āš ļø No guests found in database") + logger.warning("āš ļø No guests found in database") return JSONResponse( status_code=404, content=create_error_response("NOT_FOUND", "No users found") diff --git a/src/backend/tools/basetools.py b/src/backend/tools/basetools.py index 04170b5..8727110 100644 --- a/src/backend/tools/basetools.py +++ b/src/backend/tools/basetools.py @@ -181,7 +181,7 @@ def get_forecast(grid_endpoint): def TickerValue(ticker_symbols): api_key = os.getenv("TWELVEDATA_API_KEY", "") if not api_key: - return {"error": f"Error fetching data: No API key for TwelveData"} + return {"error": "Error fetching data: No API key for TwelveData"} results = [] for ticker_symbol in ticker_symbols.split(","): @@ -512,13 +512,13 @@ class ToolEntry(BaseModel): tool: Tool def llm_tools(tools: List[ToolEntry]) -> List[Dict[str, Any]]: - return [entry.tool.model_dump(mode='json') for entry in tools if entry.enabled == True] + return [entry.tool.model_dump(mode='json') for entry in tools if entry.enabled is True] def all_tools() -> List[ToolEntry]: return [ToolEntry(tool=tool) for tool in tools] def enabled_tools(tools: List[ToolEntry]) -> List[ToolEntry]: - return [ToolEntry(tool=entry.tool) for entry in tools if entry.enabled == True] + return [ToolEntry(tool=entry.tool) for entry in tools if entry.enabled is True] tool_functions = ["DateTime", "WeatherForecast", "TickerValue", "AnalyzeSite", "GenerateImage"] __all__ = ["ToolEntry", "all_tools", "llm_tools", "enabled_tools", "tool_functions"] diff --git a/src/backend/utils/helpers.py b/src/backend/utils/helpers.py index ae573b8..028873e 100644 --- a/src/backend/utils/helpers.py +++ b/src/backend/utils/helpers.py @@ -14,37 +14,8 @@ import defines from logger import logger from models import DocumentType from models import ( - LoginRequest, CreateCandidateRequest, CreateEmployerRequest, - Candidate, Employer, Guest, AuthResponse, - MFARequest, MFAData, MFARequestResponse, MFAVerifyRequest, - EmailVerificationRequest, ResendVerificationRequest, - # API - MOCK_UUID, ApiActivityType, ChatMessageError, ChatMessageResume, - ChatMessageSkillAssessment, ChatMessageStatus, ChatMessageStreaming, - ChatMessageUser, DocumentMessage, DocumentOptions, Job, - JobRequirements, JobRequirementsMessage, LoginRequest, - CreateCandidateRequest, CreateEmployerRequest, - - # User models - Candidate, Employer, BaseUserWithType, BaseUser, Guest, - Authentication, AuthResponse, CandidateAI, - - # Job models - JobApplication, ApplicationStatus, - - # Chat models - ChatSession, ChatMessage, ChatContext, ChatQuery, ChatSenderType, ApiMessageType, ChatContextType, - ChatMessageRagSearch, - - # Document models - Document, DocumentType, DocumentListResponse, DocumentUpdateRequest, DocumentContentResponse, - - # Supporting models - Location, MFARequest, MFAData, MFARequestResponse, MFAVerifyRequest, RagContentMetadata, RagContentResponse, ResendVerificationRequest, Resume, ResumeMessage, Skill, SkillAssessment, SystemInfo, UserType, WorkExperience, Education, - - # Email - EmailVerificationRequest, - ApiStatusType + Job, + ChatMessage, DocumentType, ApiStatusType ) from typing import List, Dict @@ -103,7 +74,6 @@ async def stream_agent_response(chat_agent, user_message, chat_session_data=None """Stream agent response with proper formatting""" async def message_stream_generator(): """Generator to stream messages with persistence""" - last_log = None final_message = None import utils.llm_proxy as llm_manager @@ -215,7 +185,7 @@ async def reformat_as_markdown(database, candidate_entity, content: str): status_message = ChatMessageStatus( session_id=MOCK_UUID, - content=f"Reformatting job description as markdown...", + content="Reformatting job description as markdown...", activity=ApiActivityType.CONVERTING ) yield status_message @@ -245,10 +215,10 @@ Return only the markdown content, no other text. Make sure all content is includ chat_message: ChatMessage = message try: chat_message.content = chat_agent.extract_markdown_from_text(chat_message.content) - except Exception as e: + except Exception: pass - logger.info(f"āœ… Successfully converted content to markdown") + logger.info("āœ… Successfully converted content to markdown") yield chat_message return @@ -298,7 +268,7 @@ async def create_job_from_content(database, current_user, content: str): status_message = ChatMessageStatus( session_id=MOCK_UUID, - content=f"Analyzing document for company and requirement details...", + content="Analyzing document for company and requirement details...", activity=ApiActivityType.SEARCHING ) yield status_message diff --git a/src/backend/utils/llm_proxy.py b/src/backend/utils/llm_proxy.py index f783b09..0e35896 100644 --- a/src/backend/utils/llm_proxy.py +++ b/src/backend/utils/llm_proxy.py @@ -1066,7 +1066,7 @@ async def example_usage(): print(f"Model: {response.model}") if response.usage: - print(f"Usage Statistics:") + print("Usage Statistics:") print(f" Prompt tokens: {response.usage.prompt_tokens}") print(f" Completion tokens: {response.usage.completion_tokens}") print(f" Total tokens: {response.usage.total_tokens}") diff --git a/src/backend/utils/rate_limiter.py b/src/backend/utils/rate_limiter.py index 267698b..42f946b 100644 --- a/src/backend/utils/rate_limiter.py +++ b/src/backend/utils/rate_limiter.py @@ -325,7 +325,7 @@ def rate_limited( async def wrapper(*args, **kwargs): # Extract dependencies from function signature import inspect - sig = inspect.signature(func) + inspect.signature(func) # Get request, current_user, and rate_limiter from kwargs or args request = None