ai-voicebot/voicebot/bots/ai_chatbot.py

360 lines
14 KiB
Python

"""Enhanced AI Chatbot with Multi-Provider Support and Personality System.
This bot demonstrates the advanced capabilities including:
- Multi-provider AI integration (OpenAI, Anthropic, Local models)
- Personality system with configurable traits
- Conversation context and memory management
- Enhanced response generation with streaming support
"""
import os
import time
import uuid
from typing import Dict, Optional, Callable, Awaitable, Any
from aiortc import MediaStreamTrack
# Import system modules
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from logger import logger
from shared.models import ChatMessageModel
# Import advanced bot management modules
try:
from voicebot.ai_providers import (
AIProviderType, AIProviderConfig, ai_provider_manager,
ConversationContext, MessageRole
)
from voicebot.personality_system import personality_manager, PersonalityTrait, CommunicationStyle
from voicebot.conversation_context import context_manager
AI_PROVIDERS_AVAILABLE = True
except ImportError as e:
logger.warning(f"Advanced AI features not available: {e}")
AI_PROVIDERS_AVAILABLE = False
AGENT_NAME = "ai_chatbot"
AGENT_DESCRIPTION = "Advanced AI chatbot with multi-provider support, personality system, and conversation memory"
# Bot configuration from environment
BOT_PERSONALITY = os.getenv("AI_CHATBOT_PERSONALITY", "helpful_assistant")
BOT_AI_PROVIDER = os.getenv("AI_CHATBOT_PROVIDER", "openai")
BOT_STREAMING = os.getenv("AI_CHATBOT_STREAMING", "false").lower() == "true"
BOT_MEMORY_ENABLED = os.getenv("AI_CHATBOT_MEMORY", "true").lower() == "true"
# Fallback responses when AI providers are not available
FALLBACK_RESPONSES = {
"greeting": [
"Hello! I'm an AI chatbot ready to help you.",
"Hi there! How can I assist you today?",
"Greetings! I'm here to chat and help with any questions you have."
],
"help": [
"I'm an advanced AI chatbot that can help with various topics. Just ask me anything!",
"I can assist with questions, have conversations, and provide information on many subjects.",
"Feel free to ask me questions or just chat - I'm here to help!"
],
"capabilities": [
"I support multiple AI providers, have configurable personalities, and maintain conversation context.",
"My capabilities include natural conversation, information retrieval, and adaptive personality responses.",
"I can remember our conversation context and adapt my responses based on configured personality traits."
],
"default": [
"That's interesting! Tell me more about that.",
"I understand. What would you like to discuss next?",
"Thanks for sharing! How can I help you further?",
"I see. Is there anything specific you'd like to know about?"
],
"error": [
"I apologize, but I'm having trouble processing that right now. Could you try rephrasing?",
"Something went wrong on my end. Could you ask that again?",
"I encountered an issue. Please try your question again."
]
}
class EnhancedAIChatbot:
"""Enhanced AI chatbot with advanced features."""
def __init__(self, session_name: str):
self.session_name = session_name
self.session_id = str(uuid.uuid4())
self.ai_provider = None
self.personality = None
self.conversation_context = None
self.initialized = False
# Initialize advanced features if available
if AI_PROVIDERS_AVAILABLE:
self._initialize_ai_features()
else:
logger.warning("Running in fallback mode - advanced AI features disabled")
def _initialize_ai_features(self):
"""Initialize AI provider, personality, and context management."""
try:
# Initialize personality
self.personality = personality_manager.create_personality_from_template(BOT_PERSONALITY)
if not self.personality:
logger.warning(f"Personality template '{BOT_PERSONALITY}' not found, using default")
self.personality = personality_manager.create_personality_from_template("helpful_assistant")
# Initialize AI provider
provider_type = AIProviderType(BOT_AI_PROVIDER)
self.ai_provider = ai_provider_manager.create_provider(provider_type)
ai_provider_manager.register_provider(f"{AGENT_NAME}_{self.session_id}", self.ai_provider)
# Initialize conversation context if memory is enabled
if BOT_MEMORY_ENABLED:
self.conversation_context = context_manager.get_or_create_context(
session_id=self.session_id,
bot_name=AGENT_NAME,
conversation_id=f"{AGENT_NAME}_{self.session_id}_{int(time.time())}"
)
self.initialized = True
logger.info(f"Enhanced AI chatbot initialized: provider={BOT_AI_PROVIDER}, personality={BOT_PERSONALITY}, memory={BOT_MEMORY_ENABLED}")
except Exception as e:
logger.error(f"Failed to initialize AI features: {e}")
self.initialized = False
async def generate_response(self, message: str) -> str:
"""Generate a response using AI provider with personality and context."""
if not self.initialized or not self.ai_provider:
return self._get_fallback_response(message)
try:
# Prepare conversation context
if self.conversation_context:
# Create a new AI conversation context with personality
ai_context = ConversationContext(
session_id=self.session_id,
bot_name=AGENT_NAME,
personality_prompt=self.personality.generate_system_prompt() if self.personality else None
)
# Add personality system message
if self.personality:
ai_context.add_message(MessageRole.SYSTEM, self.personality.generate_system_prompt())
# Add conversation history context
context_summary = context_manager.get_context_for_response(self.conversation_context.conversation_id)
if context_summary:
ai_context.add_message(MessageRole.SYSTEM, f"Conversation context: {context_summary}")
else:
# Simple context without memory
ai_context = ConversationContext(
session_id=self.session_id,
bot_name=AGENT_NAME
)
if self.personality:
ai_context.add_message(MessageRole.SYSTEM, self.personality.generate_system_prompt())
# Generate response
if BOT_STREAMING:
# For streaming, collect the full response
response_parts = []
async for chunk in self.ai_provider.stream_response(ai_context, message):
response_parts.append(chunk)
response = "".join(response_parts)
else:
response = await self.ai_provider.generate_response(ai_context, message)
# Store conversation turn in context manager
if self.conversation_context:
context_manager.add_conversation_turn(
conversation_id=self.conversation_context.conversation_id,
user_message=message,
bot_response=response,
context_used={"ai_provider": BOT_AI_PROVIDER, "personality": BOT_PERSONALITY},
metadata={"timestamp": time.time(), "streaming": BOT_STREAMING}
)
return response
except Exception as e:
logger.error(f"AI response generation failed: {e}")
return self._get_fallback_response(message, error=True)
def _get_fallback_response(self, message: str, error: bool = False) -> str:
"""Get fallback response when AI providers are unavailable."""
if error:
return FALLBACK_RESPONSES["error"][hash(message) % len(FALLBACK_RESPONSES["error"])]
message_lower = message.lower()
# Simple keyword matching for fallback responses
if any(word in message_lower for word in ["hello", "hi", "hey", "greetings"]):
return FALLBACK_RESPONSES["greeting"][hash(message) % len(FALLBACK_RESPONSES["greeting"])]
elif any(word in message_lower for word in ["help", "what can you do", "capabilities"]):
return FALLBACK_RESPONSES["help"][hash(message) % len(FALLBACK_RESPONSES["help"])]
elif any(word in message_lower for word in ["features", "abilities", "advanced"]):
return FALLBACK_RESPONSES["capabilities"][hash(message) % len(FALLBACK_RESPONSES["capabilities"])]
else:
return FALLBACK_RESPONSES["default"][hash(message) % len(FALLBACK_RESPONSES["default"])]
async def health_check(self) -> Dict[str, Any]:
"""Perform health check on bot components."""
health = {
"bot_name": AGENT_NAME,
"session_id": self.session_id,
"initialized": self.initialized,
"ai_providers_available": AI_PROVIDERS_AVAILABLE,
"configuration": {
"personality": BOT_PERSONALITY,
"ai_provider": BOT_AI_PROVIDER,
"streaming": BOT_STREAMING,
"memory_enabled": BOT_MEMORY_ENABLED
}
}
if self.initialized and self.ai_provider:
try:
provider_healthy = await self.ai_provider.health_check()
health["ai_provider_status"] = "healthy" if provider_healthy else "unhealthy"
except Exception as e:
health["ai_provider_status"] = f"error: {e}"
if self.personality:
health["personality_loaded"] = True
health["personality_traits"] = [trait.value for trait in self.personality.traits]
if self.conversation_context:
health["conversation_turns"] = len(self.conversation_context.turns)
health["context_summary"] = self.conversation_context.get_conversation_summary()
return health
# Global bot instance
_bot_instance: Optional[EnhancedAIChatbot] = None
def agent_info() -> Dict[str, str]:
"""Return agent information."""
return {
"name": AGENT_NAME,
"description": AGENT_DESCRIPTION,
"has_media": "false",
"features": [
"multi_provider_ai",
"personality_system",
"conversation_memory",
"streaming_responses",
"health_monitoring"
]
}
def create_agent_tracks(session_name: str) -> Dict[str, MediaStreamTrack]:
"""AI chatbot doesn't provide media tracks - it's chat-only."""
return {}
async def handle_chat_message(
chat_message: ChatMessageModel,
send_message_func: Callable[[str], Awaitable[None]]
) -> Optional[str]:
"""Handle incoming chat messages and provide AI-powered responses."""
global _bot_instance
try:
# Initialize bot instance if needed
if _bot_instance is None:
_bot_instance = EnhancedAIChatbot(chat_message.nick)
logger.info(f"Initialized enhanced AI chatbot for session: {chat_message.nick}")
# Generate response
response = await _bot_instance.generate_response(chat_message.message)
# Send response
if response:
await send_message_func(response)
logger.info(f"AI Chatbot responded to {chat_message.nick}: {response[:100]}...")
return response
except Exception as e:
logger.error(f"Error in AI chatbot: {e}")
error_response = "I apologize, but I encountered an error. Please try again."
await send_message_func(error_response)
return error_response
async def get_bot_status() -> Dict[str, Any]:
"""Get detailed bot status and health information."""
global _bot_instance
status = {
"agent_name": AGENT_NAME,
"agent_description": AGENT_DESCRIPTION,
"features_available": AI_PROVIDERS_AVAILABLE,
"configuration": {
"personality_template": BOT_PERSONALITY,
"ai_provider": BOT_AI_PROVIDER,
"streaming_enabled": BOT_STREAMING,
"memory_enabled": BOT_MEMORY_ENABLED
}
}
if _bot_instance:
health_info = await _bot_instance.health_check()
status.update(health_info)
else:
status["instance_status"] = "not_initialized"
# Add system-level information
if AI_PROVIDERS_AVAILABLE:
status["available_personalities"] = [
template.id for template in personality_manager.list_templates()
]
status["available_providers"] = ai_provider_manager.list_providers()
# Get context manager statistics
if BOT_MEMORY_ENABLED:
context_stats = context_manager.get_statistics()
status["conversation_statistics"] = context_stats
return status
# Additional helper functions for advanced features
async def switch_personality(personality_id: str) -> bool:
"""Switch bot personality at runtime."""
global _bot_instance
if not AI_PROVIDERS_AVAILABLE or not _bot_instance:
return False
try:
new_personality = personality_manager.create_personality_from_template(personality_id)
if new_personality:
_bot_instance.personality = new_personality
logger.info(f"Switched to personality: {personality_id}")
return True
except Exception as e:
logger.error(f"Failed to switch personality: {e}")
return False
async def switch_ai_provider(provider_type: str) -> bool:
"""Switch AI provider at runtime."""
global _bot_instance
if not AI_PROVIDERS_AVAILABLE or not _bot_instance:
return False
try:
provider_enum = AIProviderType(provider_type)
new_provider = ai_provider_manager.create_provider(provider_enum)
_bot_instance.ai_provider = new_provider
logger.info(f"Switched to AI provider: {provider_type}")
return True
except Exception as e:
logger.error(f"Failed to switch AI provider: {e}")
return False