Research_AI_Assistant / test_setup.py
JatsTheAIGen's picture
Initial commit V1
66dbebd
raw
history blame
4.35 kB
# test_setup.py
"""
Test script to verify installation and basic functionality
"""
def test_imports():
"""Test all critical imports"""
print("Testing imports...")
try:
import gradio
print(f"βœ“ Gradio version: {gradio.__version__}")
import transformers
print(f"βœ“ Transformers version: {transformers.__version__}")
import torch
print(f"βœ“ PyTorch version: {torch.__version__}")
import faiss
print("βœ“ FAISS imported successfully")
import numpy as np
print(f"βœ“ NumPy version: {np.__version__}")
import pandas as pd
print(f"βœ“ Pandas version: {pd.__version__}")
print("\nβœ“ All imports successful!")
return True
except ImportError as e:
print(f"βœ— Import failed: {e}")
return False
def test_embedding_model():
"""Test embedding model loading"""
print("\nTesting embedding model...")
try:
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
print("βœ“ Embedding model loaded successfully")
# Test embedding generation
test_text = "This is a test sentence."
embedding = model.encode(test_text)
print(f"βœ“ Embedding generated: shape {embedding.shape}")
return True
except Exception as e:
print(f"βœ— Embedding model test failed: {e}")
return False
def test_llm_router():
"""Test LLM router initialization"""
print("\nTesting LLM Router...")
try:
from llm_router import LLMRouter
import os
hf_token = os.getenv("HF_TOKEN", "")
router = LLMRouter(hf_token)
print("βœ“ LLM Router initialized successfully")
return True
except Exception as e:
print(f"βœ— LLM Router test failed: {e}")
return False
def test_context_manager():
"""Test context manager initialization"""
print("\nTesting Context Manager...")
try:
from context_manager import EfficientContextManager
cm = EfficientContextManager()
print("βœ“ Context Manager initialized successfully")
return True
except Exception as e:
print(f"βœ— Context Manager test failed: {e}")
return False
def test_cache():
"""Test cache implementation"""
print("\nTesting Cache...")
try:
from cache_implementation import SessionCache
cache = SessionCache()
# Test basic operations
cache.set("test_session", {"data": "test"}, ttl=3600)
result = cache.get("test_session")
if result is not None:
print("βœ“ Cache operations working correctly")
return True
else:
print("βœ— Cache retrieval failed")
return False
except Exception as e:
print(f"βœ— Cache test failed: {e}")
return False
def test_config():
"""Test configuration loading"""
print("\nTesting Configuration...")
try:
from config import settings
print(f"βœ“ Default model: {settings.default_model}")
print(f"βœ“ Embedding model: {settings.embedding_model}")
print(f"βœ“ Max workers: {settings.max_workers}")
print(f"βœ“ Cache TTL: {settings.cache_ttl}")
return True
except Exception as e:
print(f"βœ— Configuration test failed: {e}")
return False
def run_all_tests():
"""Run all tests"""
print("=" * 50)
print("Running Setup Tests")
print("=" * 50)
tests = [
test_imports,
test_embedding_model,
test_llm_router,
test_context_manager,
test_cache,
test_config
]
results = []
for test in tests:
try:
result = test()
results.append(result)
except Exception as e:
print(f"βœ— Test crashed: {e}")
results.append(False)
print("\n" + "=" * 50)
print(f"Test Results: {sum(results)}/{len(results)} passed")
print("=" * 50)
if all(results):
print("\nβœ“ All tests passed!")
return 0
else:
print("\nβœ— Some tests failed")
return 1
if __name__ == "__main__":
exit(run_all_tests())