199 lines
6.0 KiB
Python
Executable File
199 lines
6.0 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""
|
|
EU-Utility Test Runner
|
|
======================
|
|
|
|
Comprehensive test runner with coverage reporting and result formatting.
|
|
|
|
Usage:
|
|
python run_tests.py [options]
|
|
|
|
Options:
|
|
--unit Run unit tests only
|
|
--integration Run integration tests only
|
|
--ui Run UI tests only
|
|
--performance Run performance tests only
|
|
--all Run all tests (default)
|
|
--coverage Generate coverage report
|
|
--html Generate HTML coverage report
|
|
--xml Generate XML coverage report for CI
|
|
--verbose, -v Verbose output
|
|
--fail-fast Stop on first failure
|
|
--markers Show available test markers
|
|
|
|
Examples:
|
|
python run_tests.py --unit --coverage
|
|
python run_tests.py --all --html
|
|
python run_tests.py --performance
|
|
"""
|
|
|
|
import sys
|
|
import subprocess
|
|
import argparse
|
|
from pathlib import Path
|
|
|
|
# Add project root to path
|
|
project_root = Path(__file__).parent
|
|
sys.path.insert(0, str(project_root))
|
|
|
|
|
|
def run_command(cmd, description=""):
|
|
"""Run a command and return result."""
|
|
print(f"\n{'='*60}")
|
|
print(f"Running: {description or ' '.join(cmd)}")
|
|
print('='*60)
|
|
|
|
result = subprocess.run(cmd, capture_output=False)
|
|
return result.returncode == 0
|
|
|
|
|
|
def run_unit_tests(args):
|
|
"""Run unit tests."""
|
|
cmd = ["python", "-m", "pytest", "tests/unit/", "-v" if args.verbose else "-q"]
|
|
|
|
if args.fail_fast:
|
|
cmd.append("-x")
|
|
|
|
if args.coverage:
|
|
cmd.extend(["--cov=core", "--cov=plugins"])
|
|
|
|
if args.html and args.coverage:
|
|
cmd.extend(["--cov-report=html", "--cov-report=term"])
|
|
elif args.xml and args.coverage:
|
|
cmd.extend(["--cov-report=xml", "--cov-report=term"])
|
|
elif args.coverage:
|
|
cmd.append("--cov-report=term")
|
|
|
|
return run_command(cmd, "Unit Tests")
|
|
|
|
|
|
def run_integration_tests(args):
|
|
"""Run integration tests."""
|
|
cmd = ["python", "-m", "pytest", "tests/integration/", "-v" if args.verbose else "-q", "-m", "integration"]
|
|
|
|
if args.fail_fast:
|
|
cmd.append("-x")
|
|
|
|
if args.coverage:
|
|
cmd.extend(["--cov=core", "--cov=plugins", "--cov-append"])
|
|
|
|
return run_command(cmd, "Integration Tests")
|
|
|
|
|
|
def run_ui_tests(args):
|
|
"""Run UI tests."""
|
|
cmd = ["python", "-m", "pytest", "tests/ui/", "-v" if args.verbose else "-q", "-m", "ui"]
|
|
|
|
if args.fail_fast:
|
|
cmd.append("-x")
|
|
|
|
# Check if pytest-qt is available
|
|
try:
|
|
import pytestqt
|
|
cmd.extend(["--qt-api=pyqt6"])
|
|
except ImportError:
|
|
print("Warning: pytest-qt not installed, UI tests may fail")
|
|
print("Install with: pip install pytest-qt")
|
|
|
|
return run_command(cmd, "UI Tests")
|
|
|
|
|
|
def run_performance_tests(args):
|
|
"""Run performance tests."""
|
|
cmd = ["python", "-m", "pytest", "tests/performance/", "-v" if args.verbose else "-q", "--benchmark-only"]
|
|
|
|
# Check if pytest-benchmark is available
|
|
try:
|
|
import pytest_benchmark
|
|
except ImportError:
|
|
print("Warning: pytest-benchmark not installed")
|
|
print("Install with: pip install pytest-benchmark")
|
|
return False
|
|
|
|
return run_command(cmd, "Performance Tests")
|
|
|
|
|
|
def show_markers():
|
|
"""Show available test markers."""
|
|
cmd = ["python", "-m", "pytest", "--markers"]
|
|
return run_command(cmd, "Available Markers")
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(
|
|
description="EU-Utility Test Runner",
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
epilog="""
|
|
Examples:
|
|
python run_tests.py # Run all tests
|
|
python run_tests.py --unit # Run unit tests only
|
|
python run_tests.py --unit --coverage # Run unit tests with coverage
|
|
python run_tests.py --all --html # Run all tests, generate HTML report
|
|
python run_tests.py --performance # Run performance benchmarks
|
|
"""
|
|
)
|
|
|
|
# Test categories
|
|
parser.add_argument("--unit", action="store_true", help="Run unit tests only")
|
|
parser.add_argument("--integration", action="store_true", help="Run integration tests only")
|
|
parser.add_argument("--ui", action="store_true", help="Run UI tests only")
|
|
parser.add_argument("--performance", action="store_true", help="Run performance tests only")
|
|
parser.add_argument("--all", action="store_true", help="Run all tests (default)")
|
|
|
|
# Coverage options
|
|
parser.add_argument("--coverage", action="store_true", help="Generate coverage report")
|
|
parser.add_argument("--html", action="store_true", help="Generate HTML coverage report")
|
|
parser.add_argument("--xml", action="store_true", help="Generate XML coverage report")
|
|
|
|
# Output options
|
|
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
|
|
parser.add_argument("--fail-fast", "-x", action="store_true", help="Stop on first failure")
|
|
parser.add_argument("--markers", action="store_true", help="Show available test markers")
|
|
|
|
args = parser.parse_args()
|
|
|
|
# Show markers if requested
|
|
if args.markers:
|
|
return show_markers()
|
|
|
|
# Determine which tests to run
|
|
run_all = args.all or not any([args.unit, args.integration, args.ui, args.performance])
|
|
|
|
results = []
|
|
|
|
# Run tests
|
|
if run_all or args.unit:
|
|
results.append(run_unit_tests(args))
|
|
|
|
if run_all or args.integration:
|
|
results.append(run_integration_tests(args))
|
|
|
|
if run_all or args.ui:
|
|
results.append(run_ui_tests(args))
|
|
|
|
if run_all or args.performance:
|
|
results.append(run_performance_tests(args))
|
|
|
|
# Summary
|
|
print("\n" + "="*60)
|
|
print("TEST SUMMARY")
|
|
print("="*60)
|
|
|
|
passed = sum(results)
|
|
failed = len(results) - passed
|
|
|
|
print(f"Test suites run: {len(results)}")
|
|
print(f"Passed: {passed}")
|
|
print(f"Failed: {failed}")
|
|
|
|
if all(results):
|
|
print("\n✓ All tests passed!")
|
|
return 0
|
|
else:
|
|
print("\n✗ Some tests failed")
|
|
return 1
|
|
|
|
|
|
if __name__ == "__main__":
|
|
sys.exit(main())
|