EU-Utility/plugins/test_suite/performance_benchmark/plugin.py

504 lines
17 KiB
Python

"""
Performance Benchmark Plugin
Comprehensive performance testing for all APIs:
- API call latency measurements
- Throughput testing
- Memory usage tracking
- Widget rendering performance
- HTTP request performance
- Event bus throughput
Generates benchmark reports with performance metrics.
"""
import time
import gc
import sys
from datetime import datetime
from typing import Dict, List, Any, Callable
from dataclasses import dataclass, field
from statistics import mean, median, stdev
from core.base_plugin import BasePlugin
from core.api.plugin_api import get_api
from core.api.widget_api import get_widget_api, WidgetType
from core.api.external_api import get_external_api
@dataclass
class BenchmarkResult:
"""Single benchmark result."""
category: str
operation: str
iterations: int
total_time_ms: float
avg_time_ms: float
min_time_ms: float
max_time_ms: float
throughput_ops_sec: float
memory_kb: float = 0
notes: str = ""
class PerformanceBenchmarkPlugin(BasePlugin):
"""
Performance benchmark suite for EU-Utility APIs.
Measures latency, throughput, and resource usage
to identify bottlenecks and track performance over time.
"""
def __init__(self):
super().__init__()
self.api = None
self.widget_api = None
self.external_api = None
self.results: List[BenchmarkResult] = []
self.widget = None
self.warmup_iterations = 10
def initialize(self):
"""Initialize and run benchmarks."""
self.api = get_api()
self.widget_api = get_widget_api()
self.external_api = get_external_api()
self._create_results_widget()
# Run benchmarks after a short delay to let UI settle
import threading
threading.Timer(0.5, self._run_all_benchmarks).start()
def _create_results_widget(self):
"""Create widget to display benchmark results."""
self.widget = self.widget_api.create_widget(
name="performance_benchmark",
title="⚡ Performance Benchmark",
size=(900, 700),
position=(300, 100),
widget_type=WidgetType.CHART
)
self._update_widget_display()
self.widget.show()
def _update_widget_display(self):
"""Update widget content."""
try:
from PyQt6.QtWidgets import (
QWidget, QVBoxLayout, QHBoxLayout, QLabel,
QPushButton, QTableWidget, QTableWidgetItem,
QHeaderView, QProgressBar, QTextBrowser, QGroupBox
)
from PyQt6.QtCore import Qt
from PyQt6.QtGui import QColor
container = QWidget()
main_layout = QVBoxLayout(container)
# Header
header = QLabel("⚡ API Performance Benchmark")
header.setStyleSheet("font-size: 22px; font-weight: bold; color: #ff8c42;")
main_layout.addWidget(header)
# Summary section
if self.results:
summary_layout = QHBoxLayout()
total_ops = sum(r.iterations for r in self.results)
avg_latency = mean(r.avg_time_ms for r in self.results)
total_throughput = sum(r.throughput_ops_sec for r in self.results)
summaries = [
("Total Operations", f"{total_ops:,}"),
("Avg Latency", f"{avg_latency:.3f}ms"),
("Combined Throughput", f"{total_throughput:,.0f}/s")
]
for title, value in summaries:
group = QGroupBox(title)
group_layout = QVBoxLayout(group)
lbl = QLabel(value)
lbl.setStyleSheet("font-size: 20px; font-weight: bold; color: #4ecca3;")
lbl.setAlignment(Qt.AlignmentFlag.AlignCenter)
group_layout.addWidget(lbl)
summary_layout.addWidget(group)
main_layout.addLayout(summary_layout)
# Results table
self.results_table = QTableWidget()
self.results_table.setColumnCount(7)
self.results_table.setHorizontalHeaderLabels([
"Category", "Operation", "Iterations", "Avg (ms)",
"Min/Max (ms)", "Throughput (ops/s)", "Status"
])
self.results_table.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeMode.Stretch)
self._populate_results_table()
main_layout.addWidget(self.results_table)
# Controls
btn_layout = QHBoxLayout()
btn_run = QPushButton("▶ Run Benchmarks")
btn_run.clicked.connect(self._run_all_benchmarks)
btn_layout.addWidget(btn_run)
btn_export = QPushButton("📊 Export Results")
btn_export.clicked.connect(self._export_results)
btn_layout.addWidget(btn_export)
main_layout.addLayout(btn_layout)
# Detailed report
if self.results:
report_group = QGroupBox("Detailed Report")
report_layout = QVBoxLayout(report_group)
self.report_browser = QTextBrowser()
self.report_browser.setHtml(self._generate_detailed_report())
self.report_browser.setMaximumHeight(200)
report_layout.addWidget(self.report_browser)
main_layout.addWidget(report_group)
self.widget.set_content(container)
except ImportError as e:
print(f"Widget error: {e}")
def _populate_results_table(self):
"""Populate results table."""
if not hasattr(self, 'results_table'):
return
self.results_table.setRowCount(len(self.results))
for i, r in enumerate(self.results):
self.results_table.setItem(i, 0, QTableWidgetItem(r.category))
self.results_table.setItem(i, 1, QTableWidgetItem(r.operation))
self.results_table.setItem(i, 2, QTableWidgetItem(f"{r.iterations:,}"))
self.results_table.setItem(i, 3, QTableWidgetItem(f"{r.avg_time_ms:.3f}"))
self.results_table.setItem(i, 4, QTableWidgetItem(f"{r.min_time_ms:.3f} / {r.max_time_ms:.3f}"))
self.results_table.setItem(i, 5, QTableWidgetItem(f"{r.throughput_ops_sec:,.0f}"))
# Status based on performance
status = "✅ Good"
if r.avg_time_ms > 100:
status = "⚠️ Slow"
elif r.avg_time_ms > 10:
status = "⚡ OK"
status_item = QTableWidgetItem(status)
self.results_table.setItem(i, 6, status_item)
def _generate_detailed_report(self) -> str:
"""Generate detailed HTML report."""
html = """
<style>
body { font-family: 'Segoe UI', monospace; background: #1a1a2e; color: #eee; padding: 15px; }
h3 { color: #ff8c42; margin-top: 15px; }
.metric { margin: 5px 0; }
.good { color: #4ecca3; }
.warning { color: #ffd93d; }
.slow { color: #ff6b6b; }
table { width: 100%; border-collapse: collapse; margin: 10px 0; font-size: 12px; }
th { background: #2d3748; padding: 8px; text-align: left; }
td { padding: 6px; border-bottom: 1px solid #444; }
</style>
<h3>Performance Summary by Category</h3>
<table>
<tr><th>Category</th><th>Tests</th><th>Avg Latency</th><th>Total Throughput</th></tr>
"""
categories = {}
for r in self.results:
if r.category not in categories:
categories[r.category] = []
categories[r.category].append(r)
for cat, results in categories.items():
avg_lat = mean(r.avg_time_ms for r in results)
total_tp = sum(r.throughput_ops_sec for r in results)
html += f"""
<tr>
<td><strong>{cat}</strong></td>
<td>{len(results)}</td>
<td class="{'slow' if avg_lat > 100 else 'good'}">{avg_lat:.3f}ms</td>
<td>{total_tp:,.0f}/s</td>
</tr>
"""
html += "</table>"
# Performance grades
html += "<h3>Performance Grades</h3>"
all_latencies = [r.avg_time_ms for r in self.results]
if all_latencies:
overall_avg = mean(all_latencies)
html += f"""
<div class="metric">Overall Average Latency:
<span class="{'slow' if overall_avg > 50 else 'warning' if overall_avg > 10 else 'good'}">{overall_avg:.3f}ms</span>
</div>
"""
# Slowest operations
slowest = sorted(self.results, key=lambda r: r.avg_time_ms, reverse=True)[:3]
html += "<p><strong>Top 3 Slowest Operations:</strong></p><ul>"
for r in slowest:
html += f"<li>{r.category}.{r.operation}: {r.avg_time_ms:.3f}ms</li>"
html += "</ul>"
return html
def _benchmark(self, operation: Callable, iterations: int,
category: str, name: str) -> BenchmarkResult:
"""Run a benchmark and return results."""
# Warmup
for _ in range(self.warmup_iterations):
try:
operation()
except:
pass
# Actual benchmark
times = []
gc.collect() # Clean up before benchmark
for _ in range(iterations):
start = time.perf_counter()
try:
operation()
except Exception as e:
print(f"Benchmark error: {e}")
end = time.perf_counter()
times.append((end - start) * 1000) # Convert to ms
total_time = sum(times)
return BenchmarkResult(
category=category,
operation=name,
iterations=iterations,
total_time_ms=total_time,
avg_time_ms=mean(times),
min_time_ms=min(times),
max_time_ms=max(times),
throughput_ops_sec=iterations / (total_time / 1000) if total_time > 0 else 0
)
def _run_all_benchmarks(self):
"""Execute all performance benchmarks."""
self.results.clear()
# PluginAPI benchmarks
self._benchmark_datastore()
self._benchmark_eventbus()
self._benchmark_http()
# WidgetAPI benchmarks
self._benchmark_widget_creation()
self._benchmark_widget_operations()
# ExternalAPI benchmarks
self._benchmark_external_api()
self._update_widget_display()
# Show completion notification
self.api.show_notification(
"Benchmark Complete",
f"Completed {len(self.results)} benchmark tests",
duration=3000
)
def _benchmark_datastore(self):
"""Benchmark DataStore operations."""
# Write benchmark
def write_op():
self.api.set_data(f"bench_key_{time.time()}", {"test": "data"})
self.results.append(self._benchmark(write_op, 100, "DataStore", "write"))
# Read benchmark
self.api.set_data("bench_read_key", "test_value")
def read_op():
self.api.get_data("bench_read_key")
self.results.append(self._benchmark(read_op, 1000, "DataStore", "read"))
# Cleanup
self.api.delete_data("bench_read_key")
def _benchmark_eventbus(self):
"""Benchmark Event Bus operations."""
received = []
def handler(data):
received.append(data)
sub_id = self.api.subscribe("bench.event", handler)
# Publish benchmark
def publish_op():
self.api.publish("bench.event", {"test": "data"})
result = self._benchmark(publish_op, 500, "EventBus", "publish")
result.notes = f"Received: {len(received)} events"
self.results.append(result)
# Subscribe/unsubscribe benchmark
def sub_unsub_op():
sid = self.api.subscribe("bench.temp", lambda x: None)
self.api.unsubscribe(sid)
self.results.append(self._benchmark(sub_unsub_op, 100, "EventBus", "subscribe/unsubscribe"))
self.api.unsubscribe(sub_id)
def _benchmark_http(self):
"""Benchmark HTTP operations."""
# Note: This will make actual HTTP requests
def http_get_op():
self.api.http_get("https://httpbin.org/get", cache=False)
# Use fewer iterations for network operations
self.results.append(self._benchmark(http_get_op, 5, "HTTP", "GET (network)"))
def _benchmark_widget_creation(self):
"""Benchmark widget creation."""
counter = [0]
created_widgets = []
def create_op():
name = f"bench_widget_{counter[0]}"
widget = self.widget_api.create_widget(
name=name,
title=f"Bench {counter[0]}",
size=(200, 150)
)
created_widgets.append(name)
counter[0] += 1
result = self._benchmark(create_op, 20, "WidgetAPI", "create_widget")
self.results.append(result)
# Cleanup created widgets
for name in created_widgets:
try:
self.widget_api.close_widget(name)
except:
pass
def _benchmark_widget_operations(self):
"""Benchmark widget operations."""
widget = self.widget_api.create_widget(
name="bench_op_widget",
title="Benchmark",
size=(300, 200)
)
widget.show()
# Move benchmark
pos = [0]
def move_op():
widget.move(100 + pos[0], 100 + pos[0])
pos[0] = (pos[0] + 10) % 200
self.results.append(self._benchmark(move_op, 200, "WidgetAPI", "move"))
# Opacity benchmark
def opacity_op():
widget.set_opacity(0.5 + (pos[0] % 100) / 200)
self.results.append(self._benchmark(opacity_op, 200, "WidgetAPI", "set_opacity"))
# Cleanup
widget.close()
def _benchmark_external_api(self):
"""Benchmark ExternalAPI operations."""
# API key creation benchmark
keys_created = []
def create_key_op():
key = self.external_api.create_api_key(f"bench_key_{time.time()}")
keys_created.append(key)
result = self._benchmark(create_key_op, 50, "ExternalAPI", "create_api_key")
self.results.append(result)
# Cleanup keys
for key in keys_created:
self.external_api.revoke_api_key(key)
# Endpoint registration benchmark
counter = [0]
def register_endpoint_op():
self.external_api.register_endpoint(
f"bench/endpoint/{counter[0]}",
lambda x: x
)
counter[0] += 1
self.results.append(self._benchmark(register_endpoint_op, 30, "ExternalAPI", "register_endpoint"))
def _export_results(self):
"""Export benchmark results to file."""
try:
import json
export_data = {
"timestamp": datetime.now().isoformat(),
"system_info": {
"python_version": sys.version,
"platform": sys.platform
},
"results": [
{
"category": r.category,
"operation": r.operation,
"iterations": r.iterations,
"avg_time_ms": r.avg_time_ms,
"min_time_ms": r.min_time_ms,
"max_time_ms": r.max_time_ms,
"throughput_ops_sec": r.throughput_ops_sec,
"notes": r.notes
}
for r in self.results
],
"summary": {
"total_tests": len(self.results),
"overall_avg_latency_ms": mean(r.avg_time_ms for r in self.results) if self.results else 0,
"total_throughput_ops_sec": sum(r.throughput_ops_sec for r in self.results)
}
}
filename = f"performance_benchmark_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
with open(filename, 'w') as f:
json.dump(export_data, f, indent=2)
self.api.show_notification(
"Export Complete",
f"Results saved to {filename}",
duration=3000
)
except Exception as e:
self.api.show_notification(
"Export Failed",
str(e),
duration=3000
)
def shutdown(self):
"""Clean up resources."""
if self.widget:
self.widget.close()
# Plugin entry point
plugin_class = PerformanceBenchmarkPlugin