196 lines
6.4 KiB
Python
196 lines
6.4 KiB
Python
"""
|
|
Simple test to verify Step 5B enhanced bot functionality.
|
|
|
|
This test verifies that the enhanced bot components work correctly
|
|
when integrated with the existing voicebot system.
|
|
"""
|
|
|
|
import asyncio
|
|
import os
|
|
import time
|
|
|
|
# Set up test environment variables
|
|
os.environ["AI_CHATBOT_PERSONALITY"] = "helpful_assistant"
|
|
os.environ["AI_CHATBOT_PROVIDER"] = "local" # Use local provider for testing
|
|
os.environ["AI_CHATBOT_STREAMING"] = "false"
|
|
os.environ["AI_CHATBOT_MEMORY"] = "true"
|
|
|
|
# Import test modules
|
|
import sys
|
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
from shared.models import ChatMessageModel
|
|
|
|
|
|
async def test_enhanced_ai_chatbot():
|
|
"""Test the enhanced AI chatbot functionality."""
|
|
print("Testing Enhanced AI Chatbot...")
|
|
|
|
try:
|
|
# Import the enhanced bot
|
|
from voicebot.bots.ai_chatbot import handle_chat_message, get_bot_status
|
|
|
|
# Create a mock send function
|
|
responses = []
|
|
async def mock_send(message: str):
|
|
responses.append(message)
|
|
print(f"Bot Response: {message}")
|
|
|
|
# Test message handling
|
|
test_message = ChatMessageModel(
|
|
id="test_message_id",
|
|
sender_name="test_user",
|
|
sender_session_id="test_session",
|
|
lobby_id="test_lobby",
|
|
message="Hello, can you help me?",
|
|
timestamp=time.time()
|
|
)
|
|
|
|
print(f"Sending test message: {test_message.message}")
|
|
response = await handle_chat_message(test_message, mock_send)
|
|
|
|
if response:
|
|
print(f"✓ Bot responded successfully: {response[:50]}...")
|
|
else:
|
|
print("✗ Bot did not respond")
|
|
|
|
# Test bot status
|
|
print("\nTesting bot status...")
|
|
status = await get_bot_status()
|
|
print("✓ Bot status retrieved:")
|
|
print(f" - Agent: {status.get('agent_name', 'unknown')}")
|
|
print(f" - Features Available: {status.get('features_available', False)}")
|
|
print(f" - Configuration: {status.get('configuration', {})}")
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"✗ Enhanced bot test failed: {e}")
|
|
return False
|
|
|
|
|
|
async def test_personality_system():
|
|
"""Test the personality system components."""
|
|
print("\nTesting Personality System...")
|
|
|
|
try:
|
|
from voicebot.personality_system import personality_manager
|
|
|
|
# Test listing templates
|
|
templates = personality_manager.list_templates()
|
|
print(f"✓ Found {len(templates)} personality templates:")
|
|
for template in templates:
|
|
print(f" - {template.id}: {template.description}")
|
|
|
|
# Test creating personality from template
|
|
personality = personality_manager.create_personality_from_template("helpful_assistant")
|
|
if personality:
|
|
print(f"✓ Created personality: {personality.name}")
|
|
print(f" - Traits: {[trait.value for trait in personality.traits]}")
|
|
print(f" - Communication Style: {personality.communication_style.value}")
|
|
else:
|
|
print("✗ Failed to create personality")
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"✗ Personality system test failed: {e}")
|
|
return False
|
|
|
|
|
|
async def test_conversation_context():
|
|
"""Test the conversation context management."""
|
|
print("\nTesting Conversation Context...")
|
|
|
|
try:
|
|
from voicebot.conversation_context import context_manager
|
|
|
|
# Test creating context
|
|
context = context_manager.get_or_create_context(
|
|
session_id="test_session",
|
|
bot_name="test_bot",
|
|
conversation_id="test_conversation"
|
|
)
|
|
|
|
if context:
|
|
print(f"✓ Created conversation context: {context.conversation_id}")
|
|
|
|
# Test adding conversation turn
|
|
context_manager.add_conversation_turn(
|
|
conversation_id=context.conversation_id,
|
|
user_message="Test message",
|
|
bot_response="Test response",
|
|
context_used={"test": "context"},
|
|
metadata={"timestamp": time.time()}
|
|
)
|
|
|
|
print("✓ Added conversation turn")
|
|
print(f" - Turns in context: {len(context.turns)}")
|
|
|
|
# Test context summary
|
|
summary = context_manager.get_context_for_response(context.conversation_id)
|
|
if summary:
|
|
print(f"✓ Generated context summary: {summary[:50]}...")
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"✗ Conversation context test failed: {e}")
|
|
return False
|
|
|
|
|
|
async def test_integration_orchestrator():
|
|
"""Test the integration orchestrator."""
|
|
print("\nTesting Integration Orchestrator...")
|
|
|
|
try:
|
|
from step_5b_integration_demo import enhanced_orchestrator
|
|
|
|
# Test bot discovery
|
|
enhanced_bots = await enhanced_orchestrator.discover_enhanced_bots()
|
|
print(f"✓ Discovered {len(enhanced_bots)} bots")
|
|
|
|
# Find enhanced bots
|
|
enhanced_count = sum(1 for bot_info in enhanced_bots.values()
|
|
if bot_info.get('enhanced_features', False))
|
|
print(f"✓ Found {enhanced_count} enhanced bots")
|
|
|
|
# Test analytics
|
|
analytics = enhanced_orchestrator.get_bot_analytics()
|
|
print(f"✓ Analytics: {analytics['enhanced_bots_count']} enhanced bots configured")
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"✗ Integration orchestrator test failed: {e}")
|
|
return False
|
|
|
|
|
|
async def run_all_tests():
|
|
"""Run all Step 5B tests."""
|
|
print("=== Step 5B Enhanced Bot Management Tests ===\n")
|
|
|
|
test_results = []
|
|
|
|
# Run individual tests
|
|
test_results.append(await test_enhanced_ai_chatbot())
|
|
test_results.append(await test_personality_system())
|
|
test_results.append(await test_conversation_context())
|
|
test_results.append(await test_integration_orchestrator())
|
|
|
|
# Summary
|
|
passed = sum(test_results)
|
|
total = len(test_results)
|
|
|
|
print(f"\n=== Test Results: {passed}/{total} tests passed ===")
|
|
|
|
if passed == total:
|
|
print("🎉 All Step 5B components are working correctly!")
|
|
else:
|
|
print("⚠️ Some tests failed - check the output above for details")
|
|
|
|
return passed == total
|
|
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(run_all_tests())
|