diff --git a/src/backend/agents/base.py b/src/backend/agents/base.py index a9c99f2..e3aa1d4 100644 --- a/src/backend/agents/base.py +++ b/src/backend/agents/base.py @@ -358,8 +358,9 @@ class Agent(BaseModel, ABC): size=user.file_watcher.collection.count() ) + entries += len(rag_metadata.documents) rag_message.metadata.rag_results.append(rag_metadata) - rag_message.content = f"Results from {rag.name} RAG: {len(chroma_results['documents'])} results." + rag_message.content = f"Results from {rag.name} RAG: {len(rag_metadata.documents)} results." yield rag_message rag_message.content = ( @@ -377,7 +378,7 @@ class Agent(BaseModel, ABC): return async def generate( - self, llm: Any, model: str, user_message: ChatMessageUser, user_id: str, temperature=0.7 + self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate, temperature=0.7 ) -> AsyncGenerator[ChatMessage | ChatMessageBase, None]: logger.info(f"{self.agent_type} - {inspect.stack()[0].function}") diff --git a/src/backend/agents/candidate_chat.py b/src/backend/agents/candidate_chat.py index 072a8f5..c165fe8 100644 --- a/src/backend/agents/candidate_chat.py +++ b/src/backend/agents/candidate_chat.py @@ -7,11 +7,10 @@ from .base import Agent, agent_registry from logger import logger from .registry import agent_registry -from models import ( ChatQuery, ChatMessage, Tunables, ChatStatusType) +from models import ( ChatQuery, ChatMessage, Tunables, ChatStatusType, ChatMessageUser, Candidate) + system_message = f""" -Launched on {datetime.now().isoformat()}. - When answering queries, follow these steps: - First analyze the query to determine if real-time information from the tools might be helpful @@ -57,31 +56,18 @@ class CandidateChat(Agent): system_prompt: str = system_message -# async def prepare_message(self, message: Message) -> AsyncGenerator[Message, None]: -# logger.info(f"{self.agent_type} - {inspect.stack()[0].function}") -# if not self.context: -# raise ValueError("Context is not set for this agent.") - -# async for message in super().prepare_message(message): -# if message.status != "done": -# yield message - -# if message.preamble: -# excluded = {} -# preamble_types = [ -# f"<|{p}|>" for p in message.preamble.keys() if p not in excluded -# ] -# preamble_types_AND = " and ".join(preamble_types) -# preamble_types_OR = " or ".join(preamble_types) -# message.preamble[ -# "rules" -# ] = f"""\ -# - Answer the question based on the information provided in the {preamble_types_AND} sections by incorporate it seamlessly and refer to it using natural language instead of mentioning {preamble_types_OR} or quoting it directly. -# - If there is no information in these sections, answer based on your knowledge, or use any available tools. -# - Avoid phrases like 'According to the {preamble_types[0]}' or similar references to the {preamble_types_OR}. -# """ -# message.preamble["question"] = "Respond to:" + async def generate( + self, llm: Any, model: str, user_message: ChatMessageUser, user: Candidate, temperature=0.7 + ): + self.system_prompt = """ +You are a helpful assistant designed to answer questions about {candidate.full_name}, their resumes, and related topics. You can also generate images based on user requests. +{system_message} +""" + async for message in super().generate(llm, model, user_message, user, temperature): + yield message + # Register the base agent agent_registry.register(CandidateChat._agent_type, CandidateChat) + diff --git a/src/backend/main.py b/src/backend/main.py index 5439c4a..30c7d3a 100644 --- a/src/backend/main.py +++ b/src/backend/main.py @@ -1994,7 +1994,7 @@ async def post_chat_session_message_stream( llm=llm_manager.get_llm(), model=defines.model, user_message=user_message, - user_id=current_user.id, + user=current_user, ): # Store reference to the complete AI message if generated_message.status == ChatStatusType.DONE: