714 lines
24 KiB
Python
714 lines
24 KiB
Python
"""
|
|
StatsDashboard Plugin - Advanced Statistics and Analytics Dashboard
|
|
|
|
Provides comprehensive statistics, metrics, and analytics for EU-Utility
|
|
usage patterns, plugin performance, and system health.
|
|
"""
|
|
|
|
import os
|
|
import json
|
|
import time
|
|
import statistics
|
|
from pathlib import Path
|
|
from typing import Dict, Any, List, Optional, Callable, Tuple
|
|
from dataclasses import dataclass, field, asdict
|
|
from datetime import datetime, timedelta
|
|
from collections import defaultdict, deque
|
|
import threading
|
|
|
|
from core.base_plugin import BasePlugin
|
|
|
|
|
|
@dataclass
|
|
class MetricPoint:
|
|
"""A single metric data point."""
|
|
timestamp: float
|
|
value: float
|
|
labels: Dict[str, str] = field(default_factory=dict)
|
|
|
|
|
|
@dataclass
|
|
class TimeSeries:
|
|
"""Time series data for a metric."""
|
|
name: str
|
|
description: str
|
|
unit: str
|
|
data: deque = field(default_factory=lambda: deque(maxlen=10000))
|
|
|
|
def add(self, value: float, labels: Optional[Dict[str, str]] = None) -> None:
|
|
"""Add a new data point."""
|
|
self.data.append(MetricPoint(time.time(), value, labels or {}))
|
|
|
|
def get_range(self, start_time: float, end_time: float) -> List[MetricPoint]:
|
|
"""Get data points within time range."""
|
|
return [p for p in self.data if start_time <= p.timestamp <= end_time]
|
|
|
|
def get_last(self, count: int = 1) -> List[MetricPoint]:
|
|
"""Get last N data points."""
|
|
return list(self.data)[-count:]
|
|
|
|
def get_stats(self, window_seconds: Optional[float] = None) -> Dict[str, float]:
|
|
"""Get statistics for the time series."""
|
|
if window_seconds:
|
|
cutoff = time.time() - window_seconds
|
|
values = [p.value for p in self.data if p.timestamp >= cutoff]
|
|
else:
|
|
values = [p.value for p in self.data]
|
|
|
|
if not values:
|
|
return {"count": 0}
|
|
|
|
return {
|
|
"count": len(values),
|
|
"min": min(values),
|
|
"max": max(values),
|
|
"mean": statistics.mean(values),
|
|
"median": statistics.median(values),
|
|
"stdev": statistics.stdev(values) if len(values) > 1 else 0,
|
|
"sum": sum(values),
|
|
}
|
|
|
|
|
|
@dataclass
|
|
class EventRecord:
|
|
"""A recorded event."""
|
|
timestamp: float
|
|
event_type: str
|
|
details: Dict[str, Any] = field(default_factory=dict)
|
|
source: str = "unknown"
|
|
|
|
|
|
class StatsDashboardPlugin(BasePlugin):
|
|
"""
|
|
Advanced statistics and analytics dashboard.
|
|
|
|
Features:
|
|
- Real-time metrics collection
|
|
- Time-series data storage
|
|
- Usage analytics
|
|
- Plugin performance monitoring
|
|
- System health tracking
|
|
- Exportable reports
|
|
- Custom dashboards
|
|
"""
|
|
|
|
name = "stats_dashboard"
|
|
description = "Advanced statistics and analytics dashboard"
|
|
version = "1.0.0"
|
|
author = "EU-Utility"
|
|
|
|
DEFAULT_CONFIG = {
|
|
"data_dir": "data/stats",
|
|
"retention_days": 30,
|
|
"collection_interval_seconds": 60,
|
|
"enable_system_metrics": True,
|
|
"enable_plugin_metrics": True,
|
|
"enable_usage_metrics": True,
|
|
"max_events": 10000,
|
|
}
|
|
|
|
def __init__(self):
|
|
super().__init__()
|
|
self._config = self.DEFAULT_CONFIG.copy()
|
|
self._metrics: Dict[str, TimeSeries] = {}
|
|
self._events: deque = deque(maxlen=self._config["max_events"])
|
|
self._counters: Dict[str, int] = defaultdict(int)
|
|
self._gauges: Dict[str, float] = {}
|
|
self._histograms: Dict[str, List[float]] = defaultdict(list)
|
|
self._running = False
|
|
self._collection_thread: Optional[threading.Thread] = None
|
|
self._listeners: List[Callable] = []
|
|
self._data_dir = Path(self._config["data_dir"])
|
|
self._data_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
# Initialize standard metrics
|
|
self._init_standard_metrics()
|
|
|
|
def _init_standard_metrics(self) -> None:
|
|
"""Initialize standard metric time series."""
|
|
self._metrics["cpu_percent"] = TimeSeries(
|
|
"cpu_percent", "CPU usage percentage", "%"
|
|
)
|
|
self._metrics["memory_percent"] = TimeSeries(
|
|
"memory_percent", "Memory usage percentage", "%"
|
|
)
|
|
self._metrics["disk_usage"] = TimeSeries(
|
|
"disk_usage", "Disk usage percentage", "%"
|
|
)
|
|
self._metrics["uptime_seconds"] = TimeSeries(
|
|
"uptime_seconds", "Application uptime", "s"
|
|
)
|
|
self._metrics["clipboard_copies"] = TimeSeries(
|
|
"clipboard_copies", "Clipboard copy operations", "count"
|
|
)
|
|
self._metrics["clipboard_pastes"] = TimeSeries(
|
|
"clipboard_pastes", "Clipboard paste operations", "count"
|
|
)
|
|
self._metrics["plugin_load_time"] = TimeSeries(
|
|
"plugin_load_time", "Plugin load time", "ms"
|
|
)
|
|
self._metrics["active_plugins"] = TimeSeries(
|
|
"active_plugins", "Number of active plugins", "count"
|
|
)
|
|
|
|
def on_start(self) -> None:
|
|
"""Start the stats dashboard."""
|
|
print(f"[{self.name}] Starting stats dashboard...")
|
|
self._running = True
|
|
self._start_time = time.time()
|
|
|
|
# Load historical data
|
|
self._load_data()
|
|
|
|
# Start collection thread
|
|
if self._config["enable_system_metrics"]:
|
|
self._collection_thread = threading.Thread(
|
|
target=self._collection_loop, daemon=True
|
|
)
|
|
self._collection_thread.start()
|
|
|
|
# Record startup event
|
|
self.record_event("system", "stats_dashboard_started", {
|
|
"version": self.version,
|
|
"config": self._config,
|
|
})
|
|
|
|
def on_stop(self) -> None:
|
|
"""Stop the stats dashboard."""
|
|
print(f"[{self.name}] Stopping stats dashboard...")
|
|
self._running = False
|
|
|
|
# Record shutdown event
|
|
self.record_event("system", "stats_dashboard_stopped", {
|
|
"uptime": time.time() - self._start_time,
|
|
})
|
|
|
|
# Save data
|
|
self._save_data()
|
|
|
|
# Metric Collection
|
|
|
|
def _collection_loop(self) -> None:
|
|
"""Background loop for collecting system metrics."""
|
|
while self._running:
|
|
try:
|
|
self._collect_system_metrics()
|
|
self._collect_clipboard_metrics()
|
|
except Exception as e:
|
|
print(f"[{self.name}] Collection error: {e}")
|
|
|
|
# Sleep with early exit check
|
|
for _ in range(self._config["collection_interval_seconds"]):
|
|
if not self._running:
|
|
break
|
|
time.sleep(1)
|
|
|
|
def _collect_system_metrics(self) -> None:
|
|
"""Collect system-level metrics."""
|
|
try:
|
|
import psutil
|
|
|
|
# CPU
|
|
cpu_percent = psutil.cpu_percent(interval=1)
|
|
self._metrics["cpu_percent"].add(cpu_percent)
|
|
|
|
# Memory
|
|
memory = psutil.virtual_memory()
|
|
self._metrics["memory_percent"].add(memory.percent)
|
|
|
|
# Disk
|
|
disk = psutil.disk_usage('.')
|
|
disk_percent = (disk.used / disk.total) * 100
|
|
self._metrics["disk_usage"].add(disk_percent)
|
|
|
|
except ImportError:
|
|
pass # psutil not available
|
|
|
|
# Uptime
|
|
uptime = time.time() - self._start_time
|
|
self._metrics["uptime_seconds"].add(uptime)
|
|
|
|
def _collect_clipboard_metrics(self) -> None:
|
|
"""Collect clipboard-related metrics."""
|
|
# These would be populated by the clipboard service
|
|
# For now, just record that we're tracking them
|
|
pass
|
|
|
|
# Public Metric API
|
|
|
|
def record_counter(self, name: str, value: int = 1, labels: Optional[Dict[str, str]] = None) -> None:
|
|
"""
|
|
Record a counter increment.
|
|
|
|
Args:
|
|
name: Counter name
|
|
value: Amount to increment (default 1)
|
|
labels: Optional labels for categorization
|
|
"""
|
|
self._counters[name] += value
|
|
|
|
# Also add to time series if one exists
|
|
metric_name = f"counter_{name}"
|
|
if metric_name not in self._metrics:
|
|
self._metrics[metric_name] = TimeSeries(
|
|
metric_name, f"Counter: {name}", "count"
|
|
)
|
|
self._metrics[metric_name].add(self._counters[name], labels)
|
|
|
|
def record_gauge(self, name: str, value: float, labels: Optional[Dict[str, str]] = None) -> None:
|
|
"""
|
|
Record a gauge value.
|
|
|
|
Args:
|
|
name: Gauge name
|
|
value: Current value
|
|
labels: Optional labels for categorization
|
|
"""
|
|
self._gauges[name] = value
|
|
|
|
metric_name = f"gauge_{name}"
|
|
if metric_name not in self._metrics:
|
|
self._metrics[metric_name] = TimeSeries(
|
|
metric_name, f"Gauge: {name}", "value"
|
|
)
|
|
self._metrics[metric_name].add(value, labels)
|
|
|
|
def record_histogram(self, name: str, value: float, labels: Optional[Dict[str, str]] = None) -> None:
|
|
"""
|
|
Record a value to a histogram.
|
|
|
|
Args:
|
|
name: Histogram name
|
|
value: Value to record
|
|
labels: Optional labels for categorization
|
|
"""
|
|
self._histograms[name].append(value)
|
|
|
|
# Keep only last 10000 values
|
|
if len(self._histograms[name]) > 10000:
|
|
self._histograms[name] = self._histograms[name][-10000:]
|
|
|
|
metric_name = f"histogram_{name}"
|
|
if metric_name not in self._metrics:
|
|
self._metrics[metric_name] = TimeSeries(
|
|
metric_name, f"Histogram: {name}", "value"
|
|
)
|
|
self._metrics[metric_name].add(value, labels)
|
|
|
|
def record_timing(self, name: str, duration_ms: float, labels: Optional[Dict[str, str]] = None) -> None:
|
|
"""
|
|
Record a timing measurement.
|
|
|
|
Args:
|
|
name: Timing name
|
|
duration_ms: Duration in milliseconds
|
|
labels: Optional labels for categorization
|
|
"""
|
|
self.record_histogram(f"timing_{name}", duration_ms, labels)
|
|
|
|
def record_event(self, source: str, event_type: str, details: Optional[Dict[str, Any]] = None) -> None:
|
|
"""
|
|
Record an event.
|
|
|
|
Args:
|
|
source: Event source (e.g., plugin name)
|
|
event_type: Type of event
|
|
details: Additional event details
|
|
"""
|
|
event = EventRecord(
|
|
timestamp=time.time(),
|
|
event_type=event_type,
|
|
details=details or {},
|
|
source=source,
|
|
)
|
|
self._events.append(event)
|
|
|
|
# Notify listeners
|
|
for listener in self._listeners:
|
|
try:
|
|
listener(event)
|
|
except Exception as e:
|
|
print(f"[{self.name}] Listener error: {e}")
|
|
|
|
def time_operation(self, name: str):
|
|
"""Context manager for timing operations."""
|
|
class Timer:
|
|
def __init__(timer_self, stats_plugin, operation_name):
|
|
timer_self.stats = stats_plugin
|
|
timer_self.name = operation_name
|
|
timer_self.start = None
|
|
|
|
def __enter__(timer_self):
|
|
timer_self.start = time.time()
|
|
return timer_self
|
|
|
|
def __exit__(timer_self, *args):
|
|
duration_ms = (time.time() - timer_self.start) * 1000
|
|
timer_self.stats.record_timing(timer_self.name, duration_ms)
|
|
|
|
return Timer(self, name)
|
|
|
|
# Query Methods
|
|
|
|
def get_metric(self, name: str) -> Optional[TimeSeries]:
|
|
"""Get a metric time series by name."""
|
|
return self._metrics.get(name)
|
|
|
|
def get_all_metrics(self) -> Dict[str, TimeSeries]:
|
|
"""Get all metrics."""
|
|
return self._metrics.copy()
|
|
|
|
def get_metric_names(self) -> List[str]:
|
|
"""Get list of all metric names."""
|
|
return list(self._metrics.keys())
|
|
|
|
def get_counter(self, name: str) -> int:
|
|
"""Get current counter value."""
|
|
return self._counters.get(name, 0)
|
|
|
|
def get_gauge(self, name: str) -> Optional[float]:
|
|
"""Get current gauge value."""
|
|
return self._gauges.get(name)
|
|
|
|
def get_histogram_stats(self, name: str) -> Dict[str, Any]:
|
|
"""Get statistics for a histogram."""
|
|
values = self._histograms.get(name, [])
|
|
if not values:
|
|
return {"count": 0}
|
|
|
|
sorted_values = sorted(values)
|
|
return {
|
|
"count": len(values),
|
|
"min": min(values),
|
|
"max": max(values),
|
|
"mean": statistics.mean(values),
|
|
"median": statistics.median(values),
|
|
"p50": sorted_values[len(values) // 2],
|
|
"p90": sorted_values[int(len(values) * 0.9)],
|
|
"p95": sorted_values[int(len(values) * 0.95)],
|
|
"p99": sorted_values[int(len(values) * 0.99)],
|
|
"stdev": statistics.stdev(values) if len(values) > 1 else 0,
|
|
}
|
|
|
|
def get_events(self,
|
|
event_type: Optional[str] = None,
|
|
source: Optional[str] = None,
|
|
start_time: Optional[float] = None,
|
|
end_time: Optional[float] = None,
|
|
limit: int = 100) -> List[EventRecord]:
|
|
"""
|
|
Query events with filters.
|
|
|
|
Args:
|
|
event_type: Filter by event type
|
|
source: Filter by source
|
|
start_time: Filter by start timestamp
|
|
end_time: Filter by end timestamp
|
|
limit: Maximum number of events to return
|
|
|
|
Returns:
|
|
List of matching events
|
|
"""
|
|
results = []
|
|
|
|
for event in reversed(self._events): # Newest first
|
|
if len(results) >= limit:
|
|
break
|
|
|
|
if event_type and event.event_type != event_type:
|
|
continue
|
|
if source and event.source != source:
|
|
continue
|
|
if start_time and event.timestamp < start_time:
|
|
continue
|
|
if end_time and event.timestamp > end_time:
|
|
continue
|
|
|
|
results.append(event)
|
|
|
|
return results
|
|
|
|
def get_system_health(self) -> Dict[str, Any]:
|
|
"""Get overall system health status."""
|
|
health = {
|
|
"status": "healthy",
|
|
"uptime_seconds": time.time() - self._start_time,
|
|
"metrics": {},
|
|
"issues": [],
|
|
}
|
|
|
|
# Check CPU
|
|
cpu_stats = self._metrics.get("cpu_percent", TimeSeries("", "", "")).get_stats(300)
|
|
if cpu_stats.get("mean", 0) > 80:
|
|
health["status"] = "warning"
|
|
health["issues"].append("High CPU usage")
|
|
health["metrics"]["cpu"] = cpu_stats
|
|
|
|
# Check Memory
|
|
mem_stats = self._metrics.get("memory_percent", TimeSeries("", "", "")).get_stats(300)
|
|
if mem_stats.get("mean", 0) > 85:
|
|
health["status"] = "warning"
|
|
health["issues"].append("High memory usage")
|
|
health["metrics"]["memory"] = mem_stats
|
|
|
|
# Check Disk
|
|
disk_stats = self._metrics.get("disk_usage", TimeSeries("", "", "")).get_stats()
|
|
if disk_stats.get("max", 0) > 90:
|
|
health["status"] = "critical"
|
|
health["issues"].append("Low disk space")
|
|
health["metrics"]["disk"] = disk_stats
|
|
|
|
return health
|
|
|
|
def get_dashboard_summary(self) -> Dict[str, Any]:
|
|
"""Get a summary for dashboard display."""
|
|
return {
|
|
"uptime": self._format_duration(time.time() - self._start_time),
|
|
"total_events": len(self._events),
|
|
"total_metrics": len(self._metrics),
|
|
"counters": dict(self._counters),
|
|
"gauges": self._gauges.copy(),
|
|
"health": self.get_system_health(),
|
|
"recent_events": [
|
|
{
|
|
"time": datetime.fromtimestamp(e.timestamp).isoformat(),
|
|
"type": e.event_type,
|
|
"source": e.source,
|
|
}
|
|
for e in list(self._events)[-10:]
|
|
],
|
|
}
|
|
|
|
# Reports
|
|
|
|
def generate_report(self,
|
|
start_time: Optional[float] = None,
|
|
end_time: Optional[float] = None) -> Dict[str, Any]:
|
|
"""
|
|
Generate a comprehensive statistics report.
|
|
|
|
Args:
|
|
start_time: Report start time (default: 24 hours ago)
|
|
end_time: Report end time (default: now)
|
|
|
|
Returns:
|
|
Report data dictionary
|
|
"""
|
|
if end_time is None:
|
|
end_time = time.time()
|
|
if start_time is None:
|
|
start_time = end_time - (24 * 3600)
|
|
|
|
report = {
|
|
"generated_at": datetime.now().isoformat(),
|
|
"period": {
|
|
"start": datetime.fromtimestamp(start_time).isoformat(),
|
|
"end": datetime.fromtimestamp(end_time).isoformat(),
|
|
},
|
|
"system_health": self.get_system_health(),
|
|
"metrics_summary": {},
|
|
"top_events": [],
|
|
"counters": dict(self._counters),
|
|
}
|
|
|
|
# Metric summaries
|
|
for name, series in self._metrics.items():
|
|
stats = series.get_stats(end_time - start_time)
|
|
if stats["count"] > 0:
|
|
report["metrics_summary"][name] = stats
|
|
|
|
# Event summary
|
|
event_counts = defaultdict(int)
|
|
for event in self._events:
|
|
if start_time <= event.timestamp <= end_time:
|
|
event_counts[event.event_type] += 1
|
|
|
|
report["top_events"] = sorted(
|
|
event_counts.items(),
|
|
key=lambda x: x[1],
|
|
reverse=True
|
|
)[:20]
|
|
|
|
return report
|
|
|
|
def export_report(self,
|
|
filepath: Optional[str] = None,
|
|
format: str = "json") -> str:
|
|
"""
|
|
Export a report to file.
|
|
|
|
Args:
|
|
filepath: Output file path (default: auto-generated)
|
|
format: Export format (json, csv)
|
|
|
|
Returns:
|
|
Path to exported file
|
|
"""
|
|
report = self.generate_report()
|
|
|
|
if filepath is None:
|
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
filepath = str(self._data_dir / f"report_{timestamp}.{format}")
|
|
|
|
if format == "json":
|
|
with open(filepath, 'w') as f:
|
|
json.dump(report, f, indent=2, default=str)
|
|
|
|
elif format == "csv":
|
|
# Export metrics as CSV
|
|
import csv
|
|
with open(filepath, 'w', newline='') as f:
|
|
writer = csv.writer(f)
|
|
writer.writerow(["metric", "count", "min", "max", "mean", "stdev"])
|
|
for name, stats in report["metrics_summary"].items():
|
|
writer.writerow([
|
|
name,
|
|
stats.get("count", 0),
|
|
stats.get("min", 0),
|
|
stats.get("max", 0),
|
|
stats.get("mean", 0),
|
|
stats.get("stdev", 0),
|
|
])
|
|
|
|
print(f"[{self.name}] Report exported: {filepath}")
|
|
return filepath
|
|
|
|
# Persistence
|
|
|
|
def _save_data(self) -> None:
|
|
"""Save metrics and events to disk."""
|
|
try:
|
|
# Save metrics
|
|
metrics_data = {
|
|
name: {
|
|
"name": series.name,
|
|
"description": series.description,
|
|
"unit": series.unit,
|
|
"data": [
|
|
{"t": p.timestamp, "v": p.value, "l": p.labels}
|
|
for p in series.data
|
|
],
|
|
}
|
|
for name, series in self._metrics.items()
|
|
}
|
|
|
|
metrics_file = self._data_dir / "metrics.json"
|
|
with open(metrics_file, 'w') as f:
|
|
json.dump(metrics_data, f)
|
|
|
|
# Save events
|
|
events_data = [
|
|
{
|
|
"timestamp": e.timestamp,
|
|
"event_type": e.event_type,
|
|
"details": e.details,
|
|
"source": e.source,
|
|
}
|
|
for e in self._events
|
|
]
|
|
|
|
events_file = self._data_dir / "events.json"
|
|
with open(events_file, 'w') as f:
|
|
json.dump(events_data, f)
|
|
|
|
# Save counters and gauges
|
|
state = {
|
|
"counters": dict(self._counters),
|
|
"gauges": self._gauges,
|
|
"histograms": {k: v[-1000:] for k, v in self._histograms.items()},
|
|
}
|
|
|
|
state_file = self._data_dir / "state.json"
|
|
with open(state_file, 'w') as f:
|
|
json.dump(state, f)
|
|
|
|
print(f"[{self.name}] Data saved")
|
|
|
|
except Exception as e:
|
|
print(f"[{self.name}] Failed to save data: {e}")
|
|
|
|
def _load_data(self) -> None:
|
|
"""Load metrics and events from disk."""
|
|
try:
|
|
# Load metrics
|
|
metrics_file = self._data_dir / "metrics.json"
|
|
if metrics_file.exists():
|
|
with open(metrics_file) as f:
|
|
data = json.load(f)
|
|
|
|
for name, series_data in data.items():
|
|
series = TimeSeries(
|
|
series_data["name"],
|
|
series_data["description"],
|
|
series_data["unit"],
|
|
)
|
|
for point in series_data.get("data", []):
|
|
series.data.append(MetricPoint(
|
|
timestamp=point["t"],
|
|
value=point["v"],
|
|
labels=point.get("l", {}),
|
|
))
|
|
self._metrics[name] = series
|
|
|
|
# Load events
|
|
events_file = self._data_dir / "events.json"
|
|
if events_file.exists():
|
|
with open(events_file) as f:
|
|
data = json.load(f)
|
|
|
|
for event_data in data:
|
|
self._events.append(EventRecord(
|
|
timestamp=event_data["timestamp"],
|
|
event_type=event_data["event_type"],
|
|
details=event_data.get("details", {}),
|
|
source=event_data.get("source", "unknown"),
|
|
))
|
|
|
|
# Load state
|
|
state_file = self._data_dir / "state.json"
|
|
if state_file.exists():
|
|
with open(state_file) as f:
|
|
state = json.load(f)
|
|
|
|
self._counters = defaultdict(int, state.get("counters", {}))
|
|
self._gauges = state.get("gauges", {})
|
|
self._histograms = defaultdict(list, state.get("histograms", {}))
|
|
|
|
print(f"[{self.name}] Data loaded")
|
|
|
|
except Exception as e:
|
|
print(f"[{self.name}] Failed to load data: {e}")
|
|
|
|
# Utilities
|
|
|
|
def _format_duration(self, seconds: float) -> str:
|
|
"""Format duration in human-readable form."""
|
|
hours = int(seconds // 3600)
|
|
minutes = int((seconds % 3600) // 60)
|
|
secs = int(seconds % 60)
|
|
|
|
if hours > 0:
|
|
return f"{hours}h {minutes}m {secs}s"
|
|
elif minutes > 0:
|
|
return f"{minutes}m {secs}s"
|
|
else:
|
|
return f"{secs}s"
|
|
|
|
# Event Listeners
|
|
|
|
def add_listener(self, callback: Callable[[EventRecord], None]) -> None:
|
|
"""Add an event listener."""
|
|
self._listeners.append(callback)
|
|
|
|
def remove_listener(self, callback: Callable) -> None:
|
|
"""Remove an event listener."""
|
|
if callback in self._listeners:
|
|
self._listeners.remove(callback)
|
|
|
|
# Configuration
|
|
|
|
def set_config(self, config: Dict[str, Any]) -> None:
|
|
"""Update configuration."""
|
|
self._config.update(config)
|
|
self._events = deque(maxlen=self._config["max_events"])
|