85 lines
3.0 KiB
Python
85 lines
3.0 KiB
Python
from __future__ import annotations
|
|
from typing import Literal, AsyncGenerator, ClassVar, Optional, Any
|
|
|
|
from pydantic import Field
|
|
|
|
from database.core import RedisDatabase
|
|
|
|
from .base import Agent, agent_registry
|
|
from logger import logger
|
|
|
|
from models import ApiMessage, Tunables, ApiStatusType, LLMMessage
|
|
|
|
|
|
system_message = """
|
|
When answering queries, follow these steps:
|
|
|
|
- When any content from <|context|> is relevant, synthesize information from all sources to provide the most complete answer.
|
|
- Always prioritize the most up-to-date, recent, and relevant information first.
|
|
- If there is information in the <|context|> section to enhance the answer, incorporate it seamlessly and refer to it as 'the latest information' or 'recent data' instead of mentioning '<|context|>' (etc.) or quoting it directly.
|
|
- Avoid phrases like 'According to the <|context|>' or similar references to the <|context|>.
|
|
|
|
Always use <|context|> when possible. Be concise, and never make up information. If you do not know the answer, say so.
|
|
|
|
Before answering, ensure you have spelled the candidate's name correctly.
|
|
"""
|
|
|
|
|
|
class CandidateChat(Agent):
|
|
"""
|
|
CandidateChat Agent
|
|
"""
|
|
|
|
agent_type: Literal["candidate_chat"] = "candidate_chat" # type: ignore
|
|
_agent_type: ClassVar[str] = agent_type # Add this for registration
|
|
|
|
system_prompt: str = system_message
|
|
sessions: dict[str, list[LLMMessage]] = Field(default_factory=dict)
|
|
|
|
async def generate(
|
|
self,
|
|
llm: Any,
|
|
model: str,
|
|
session_id: str,
|
|
prompt: str,
|
|
database: RedisDatabase,
|
|
tunables: Optional[Tunables] = None,
|
|
temperature=0.7,
|
|
) -> AsyncGenerator[ApiMessage, None]:
|
|
user = self.user
|
|
if not user:
|
|
logger.error("User is not set for CandidateChat agent.")
|
|
raise ValueError("User must be set before generating candidate chat responses.")
|
|
self.system_prompt = f"""
|
|
You are a helpful expert system representing a {user.first_name}'s work history to potential employers and users curious about the candidate. You want to incorporate as many facts and details about {user.first_name} as possible.
|
|
|
|
When referencing the candidate, ALWAYS ensure correct spelling.
|
|
|
|
The candidate's first name is: "{user.first_name}"
|
|
The candidate's last name is: "{user.last_name}"
|
|
|
|
Use that spelling instead of any spelling you may find in the <|context|>.
|
|
|
|
{system_message}
|
|
"""
|
|
if session_id not in self.sessions:
|
|
self.sessions[session_id] = [LLMMessage(role="user", content=prompt)]
|
|
|
|
async for message in super().generate(
|
|
llm=llm,
|
|
model=model,
|
|
session_id=session_id,
|
|
prompt=prompt,
|
|
database=database,
|
|
temperature=temperature,
|
|
tunables=tunables,
|
|
):
|
|
if message.status == ApiStatusType.ERROR:
|
|
yield message
|
|
return
|
|
yield message
|
|
|
|
|
|
# Register the base agent
|
|
agent_registry.register(CandidateChat._agent_type, CandidateChat)
|