backstory/src/utils/message.py

49 lines
1.9 KiB
Python

from pydantic import BaseModel, model_validator
from typing import Dict, List, Optional, Any
from datetime import datetime, timezone
class Message(BaseModel):
# Required
prompt: str # Query to be answered
# Tunables
enable_rag: bool = True
enable_tools: bool = True
# Generated while processing message
status: str = "" # Status of the message
preamble: dict[str,str] = {} # Preamble to be prepended to the prompt
system_prompt: str = "" # System prompt provided to the LLM
context_prompt: str = "" # Full content of the message (preamble + prompt)
response: str = "" # LLM response to the preamble + query
metadata: dict[str, Any] = {
"rag": List[dict[str, Any]],
"eval_count": 0,
"eval_duration": 0,
"prompt_eval_count": 0,
"prompt_eval_duration": 0,
"context_size": 0,
}
network_packets: int = 0 # Total number of streaming packets
network_bytes: int = 0 # Total bytes sent while streaming packets
actions: List[str] = [] # Other session modifying actions performed while processing the message
timestamp: datetime = datetime.now(timezone.utc)
def add_action(self, action: str | list[str]) -> None:
"""Add a actions(s) to the message."""
if isinstance(action, str):
self.actions.append(action)
else:
self.actions.extend(action)
def get_summary(self) -> str:
"""Return a summary of the message."""
response_summary = (
f"Response: {self.response} (Actions: {', '.join(self.actions)})"
if self.response else "No response yet"
)
return (
f"Message at {self.timestamp}:\n"
f"Query: {self.preamble}{self.content}\n"
f"{response_summary}"
)