llm/ollama-context-proxy/test-proxy.py

141 lines
4.8 KiB
Python

#!/usr/bin/env python3
"""
Simple test script for debugging the Ollama Context Proxy
"""
import requests
import sys
def test_direct_ollama(base_url="http://localhost:11434"):
"""Test direct connection to Ollama"""
print("=== Testing Direct Ollama Connection ===")
print(f"URL: {base_url}")
try:
response = requests.get(f"{base_url}/api/tags", timeout=5)
print(f"Status: {response.status_code}")
print(f"Headers: {dict(response.headers)}")
if response.status_code == 200:
print("✅ Direct Ollama connection OK")
tags_data = response.json()
print(f"Available models: {[model['name'] for model in tags_data.get('models', [])]}")
else:
print(f"❌ Direct Ollama connection failed: {response.text}")
return response.status_code == 200
except Exception as e:
print(f"❌ Direct Ollama connection error: {e}")
return False
def test_proxy_health(proxy_url="http://localhost:11435"):
"""Test proxy health endpoint"""
print("\n=== Testing Proxy Health ===")
print(f"URL: {proxy_url}")
try:
response = requests.get(f"{proxy_url}/health", timeout=5)
print(f"Status: {response.status_code}")
print(f"Response: {response.text}")
if response.status_code == 200:
print("✅ Proxy health check OK")
else:
print("❌ Proxy health check failed")
return response.status_code == 200
except Exception as e:
print(f"❌ Proxy health check error: {e}")
return False
def test_proxy_debug(proxy_url="http://localhost:11435"):
"""Test proxy debug endpoint"""
print("\n=== Testing Proxy Debug ===")
print(f"URL: {proxy_url}/debug/ollama")
try:
response = requests.get(f"{proxy_url}/debug/ollama", timeout=10)
print(f"Status: {response.status_code}")
print(f"Response: {response.text}")
if response.status_code == 200:
print("✅ Proxy debug check OK")
else:
print("❌ Proxy debug check failed")
return response.status_code == 200
except Exception as e:
print(f"❌ Proxy debug check error: {e}")
return False
def test_proxy_request(proxy_url="http://localhost:11435", model="llama2"):
"""Test actual proxy request"""
print("\n=== Testing Proxy Request ===")
print(f"URL: {proxy_url}/proxy-context/auto/api/generate")
payload = {
"model": model,
"prompt": "Hello, world!",
"stream": False
}
try:
response = requests.post(
f"{proxy_url}/proxy-context/auto/api/generate",
json=payload,
timeout=30
)
print(f"Status: {response.status_code}")
print(f"Headers: {dict(response.headers)}")
print(f"Response preview: {response.text[:500]}...")
if response.status_code == 200:
print("✅ Proxy request OK")
else:
print("❌ Proxy request failed")
return response.status_code == 200
except Exception as e:
print(f"❌ Proxy request error: {e}")
return False
def main():
if len(sys.argv) > 1:
if sys.argv[1] == "--help":
print("Usage: python3 test-proxy.py [ollama_url] [proxy_url] [model]")
print(" ollama_url: Default http://localhost:11434")
print(" proxy_url: Default http://localhost:11435")
print(" model: Default llama2")
return
ollama_url = sys.argv[1] if len(sys.argv) > 1 else "http://localhost:11434"
proxy_url = sys.argv[2] if len(sys.argv) > 2 else "http://localhost:11435"
model = sys.argv[3] if len(sys.argv) > 3 else "llama2"
print("Ollama Context Proxy Debug Test")
print("===============================")
print(f"Ollama URL: {ollama_url}")
print(f"Proxy URL: {proxy_url}")
print(f"Test Model: {model}")
# Run tests
tests = [
("Direct Ollama", lambda: test_direct_ollama(ollama_url)),
("Proxy Health", lambda: test_proxy_health(proxy_url)),
("Proxy Debug", lambda: test_proxy_debug(proxy_url)),
("Proxy Request", lambda: test_proxy_request(proxy_url, model)),
]
results = []
for test_name, test_func in tests:
success = test_func()
results.append((test_name, success))
print("\n=== Summary ===")
for test_name, success in results:
status = "✅ PASS" if success else "❌ FAIL"
print(f"{test_name}: {status}")
all_passed = all(success for _, success in results)
if all_passed:
print("\n🎉 All tests passed!")
else:
print("\n⚠️ Some tests failed. Check the output above for details.")
sys.exit(1)
if __name__ == "__main__":
main()