368 lines
12 KiB
Python
368 lines
12 KiB
Python
"""
|
|
Lemontropia Suite - Hardware Detection Module
|
|
Detect GPU and ML framework availability with error handling.
|
|
"""
|
|
|
|
import logging
|
|
from typing import Dict, Any, Optional, List
|
|
from dataclasses import dataclass, field
|
|
from enum import Enum
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class GPUBackend(Enum):
|
|
"""Supported GPU backends."""
|
|
CUDA = "cuda" # NVIDIA CUDA
|
|
MPS = "mps" # Apple Metal Performance Shaders
|
|
DIRECTML = "directml" # Windows DirectML
|
|
CPU = "cpu" # Fallback CPU
|
|
|
|
|
|
@dataclass
|
|
class HardwareInfo:
|
|
"""Complete hardware information."""
|
|
# GPU Info
|
|
gpu_backend: GPUBackend = GPUBackend.CPU
|
|
cuda_available: bool = False
|
|
cuda_device_count: int = 0
|
|
cuda_devices: List[Dict] = field(default_factory=list)
|
|
mps_available: bool = False
|
|
directml_available: bool = False
|
|
|
|
# OpenCV GPU
|
|
opencv_cuda_available: bool = False
|
|
opencv_cuda_devices: int = 0
|
|
|
|
# ML Frameworks
|
|
pytorch_available: bool = False
|
|
pytorch_version: Optional[str] = None
|
|
pytorch_error: Optional[str] = None
|
|
pytorch_dll_error: bool = False
|
|
|
|
paddle_available: bool = False
|
|
paddle_version: Optional[str] = None
|
|
|
|
# System
|
|
platform: str = "unknown"
|
|
python_executable: str = "unknown"
|
|
is_windows_store_python: bool = False
|
|
|
|
def to_dict(self) -> Dict[str, Any]:
|
|
return {
|
|
'gpu': {
|
|
'backend': self.gpu_backend.value,
|
|
'cuda_available': self.cuda_available,
|
|
'cuda_devices': self.cuda_devices,
|
|
'mps_available': self.mps_available,
|
|
'directml_available': self.directml_available,
|
|
'opencv_cuda': self.opencv_cuda_available,
|
|
},
|
|
'ml_frameworks': {
|
|
'pytorch': {
|
|
'available': self.pytorch_available,
|
|
'version': self.pytorch_version,
|
|
'error': self.pytorch_error,
|
|
'dll_error': self.pytorch_dll_error,
|
|
},
|
|
'paddle': {
|
|
'available': self.paddle_available,
|
|
'version': self.paddle_version,
|
|
}
|
|
},
|
|
'system': {
|
|
'platform': self.platform,
|
|
'python': self.python_executable,
|
|
'windows_store': self.is_windows_store_python,
|
|
}
|
|
}
|
|
|
|
|
|
class HardwareDetector:
|
|
"""Detect hardware capabilities with error handling."""
|
|
|
|
@staticmethod
|
|
def detect_all() -> HardwareInfo:
|
|
"""Detect all hardware capabilities."""
|
|
info = HardwareInfo()
|
|
|
|
# Detect system info
|
|
info = HardwareDetector._detect_system(info)
|
|
|
|
# Detect OpenCV GPU
|
|
info = HardwareDetector._detect_opencv_cuda(info)
|
|
|
|
# Detect PyTorch (with special error handling)
|
|
info = HardwareDetector._detect_pytorch_safe(info)
|
|
|
|
# Detect PaddlePaddle
|
|
info = HardwareDetector._detect_paddle(info)
|
|
|
|
# Determine best GPU backend
|
|
info = HardwareDetector._determine_gpu_backend(info)
|
|
|
|
return info
|
|
|
|
@staticmethod
|
|
def _detect_system(info: HardwareInfo) -> HardwareInfo:
|
|
"""Detect system information."""
|
|
import sys
|
|
import platform
|
|
|
|
info.platform = platform.system()
|
|
info.python_executable = sys.executable
|
|
|
|
# Detect Windows Store Python
|
|
exe_lower = sys.executable.lower()
|
|
info.is_windows_store_python = (
|
|
'windowsapps' in exe_lower or
|
|
'microsoft' in exe_lower
|
|
)
|
|
|
|
if info.is_windows_store_python:
|
|
logger.warning(
|
|
"Windows Store Python detected - may have DLL compatibility issues"
|
|
)
|
|
|
|
return info
|
|
|
|
@staticmethod
|
|
def _detect_opencv_cuda(info: HardwareInfo) -> HardwareInfo:
|
|
"""Detect OpenCV CUDA support."""
|
|
try:
|
|
import cv2
|
|
|
|
cuda_count = cv2.cuda.getCudaEnabledDeviceCount()
|
|
info.opencv_cuda_devices = cuda_count
|
|
info.opencv_cuda_available = cuda_count > 0
|
|
|
|
if info.opencv_cuda_available:
|
|
try:
|
|
device_name = cv2.cuda.getDevice().name()
|
|
logger.info(f"OpenCV CUDA device: {device_name}")
|
|
except:
|
|
logger.info(f"OpenCV CUDA available ({cuda_count} devices)")
|
|
|
|
except Exception as e:
|
|
logger.debug(f"OpenCV CUDA detection failed: {e}")
|
|
info.opencv_cuda_available = False
|
|
|
|
return info
|
|
|
|
@staticmethod
|
|
def _detect_pytorch_safe(info: HardwareInfo) -> HardwareInfo:
|
|
"""
|
|
Detect PyTorch with safe error handling for DLL issues.
|
|
|
|
This is critical for Windows Store Python compatibility.
|
|
"""
|
|
try:
|
|
import torch
|
|
|
|
info.pytorch_available = True
|
|
info.pytorch_version = torch.__version__
|
|
|
|
# Check CUDA
|
|
info.cuda_available = torch.cuda.is_available()
|
|
if info.cuda_available:
|
|
info.cuda_device_count = torch.cuda.device_count()
|
|
for i in range(info.cuda_device_count):
|
|
info.cuda_devices.append({
|
|
'id': i,
|
|
'name': torch.cuda.get_device_name(i),
|
|
'memory': torch.cuda.get_device_properties(i).total_memory
|
|
})
|
|
logger.info(f"PyTorch CUDA: {info.cuda_devices}")
|
|
|
|
# Check MPS (Apple Silicon)
|
|
if hasattr(torch.backends, 'mps'):
|
|
info.mps_available = torch.backends.mps.is_available()
|
|
if info.mps_available:
|
|
logger.info("PyTorch MPS (Metal) available")
|
|
|
|
logger.info(f"PyTorch {info.pytorch_version} available")
|
|
|
|
except ImportError:
|
|
info.pytorch_available = False
|
|
info.pytorch_error = "PyTorch not installed"
|
|
logger.debug("PyTorch not installed")
|
|
|
|
except OSError as e:
|
|
# DLL error - common with Windows Store Python
|
|
error_str = str(e).lower()
|
|
info.pytorch_available = False
|
|
info.pytorch_dll_error = True
|
|
info.pytorch_error = str(e)
|
|
|
|
if any(x in error_str for x in ['dll', 'c10', 'specified module']):
|
|
logger.error(
|
|
f"PyTorch DLL error (Windows Store Python?): {e}"
|
|
)
|
|
logger.info(
|
|
"This is a known issue. Use alternative OCR backends."
|
|
)
|
|
else:
|
|
logger.error(f"PyTorch OS error: {e}")
|
|
|
|
except Exception as e:
|
|
info.pytorch_available = False
|
|
info.pytorch_error = str(e)
|
|
logger.error(f"PyTorch detection failed: {e}")
|
|
|
|
return info
|
|
|
|
@staticmethod
|
|
def _detect_paddle(info: HardwareInfo) -> HardwareInfo:
|
|
"""Detect PaddlePaddle availability."""
|
|
try:
|
|
import paddle
|
|
info.paddle_available = True
|
|
info.paddle_version = paddle.__version__
|
|
logger.info(f"PaddlePaddle {info.paddle_version} available")
|
|
|
|
except ImportError:
|
|
info.paddle_available = False
|
|
logger.debug("PaddlePaddle not installed")
|
|
|
|
except Exception as e:
|
|
info.paddle_available = False
|
|
logger.debug(f"PaddlePaddle detection failed: {e}")
|
|
|
|
return info
|
|
|
|
@staticmethod
|
|
def _determine_gpu_backend(info: HardwareInfo) -> HardwareInfo:
|
|
"""Determine the best available GPU backend."""
|
|
# Priority: CUDA > MPS > DirectML > CPU
|
|
|
|
if info.cuda_available:
|
|
info.gpu_backend = GPUBackend.CUDA
|
|
elif info.mps_available:
|
|
info.gpu_backend = GPUBackend.MPS
|
|
elif info.directml_available:
|
|
info.gpu_backend = GPUBackend.DIRECTML
|
|
else:
|
|
info.gpu_backend = GPUBackend.CPU
|
|
|
|
return info
|
|
|
|
@staticmethod
|
|
def get_gpu_summary() -> str:
|
|
"""Get a human-readable GPU summary."""
|
|
info = HardwareDetector.detect_all()
|
|
|
|
lines = ["=" * 50]
|
|
lines.append("HARDWARE DETECTION SUMMARY")
|
|
lines.append("=" * 50)
|
|
|
|
# GPU Section
|
|
lines.append(f"\nGPU Backend: {info.gpu_backend.value.upper()}")
|
|
|
|
if info.cuda_available:
|
|
lines.append(f"CUDA Devices: {info.cuda_device_count}")
|
|
for dev in info.cuda_devices:
|
|
gb = dev['memory'] / (1024**3)
|
|
lines.append(f" [{dev['id']}] {dev['name']} ({gb:.1f} GB)")
|
|
|
|
if info.mps_available:
|
|
lines.append("Apple MPS (Metal): Available")
|
|
|
|
if info.opencv_cuda_available:
|
|
lines.append(f"OpenCV CUDA: {info.opencv_cuda_devices} device(s)")
|
|
|
|
# ML Frameworks
|
|
lines.append("\nML Frameworks:")
|
|
|
|
if info.pytorch_available:
|
|
lines.append(f" PyTorch: {info.pytorch_version}")
|
|
lines.append(f" CUDA: {'Yes' if info.cuda_available else 'No'}")
|
|
else:
|
|
lines.append(f" PyTorch: Not available")
|
|
if info.pytorch_dll_error:
|
|
lines.append(f" ⚠️ DLL Error (Windows Store Python?)")
|
|
|
|
if info.paddle_available:
|
|
lines.append(f" PaddlePaddle: {info.paddle_version}")
|
|
else:
|
|
lines.append(f" PaddlePaddle: Not installed")
|
|
|
|
# System
|
|
lines.append(f"\nSystem: {info.platform}")
|
|
if info.is_windows_store_python:
|
|
lines.append("⚠️ Windows Store Python (may have DLL issues)")
|
|
|
|
lines.append("=" * 50)
|
|
|
|
return "\n".join(lines)
|
|
|
|
@staticmethod
|
|
def can_use_paddleocr() -> bool:
|
|
"""Check if PaddleOCR can be used (no DLL errors)."""
|
|
info = HardwareDetector.detect_all()
|
|
return info.pytorch_available and not info.pytorch_dll_error
|
|
|
|
@staticmethod
|
|
def recommend_ocr_backend() -> str:
|
|
"""
|
|
Recommend the best OCR backend based on hardware.
|
|
|
|
Returns:
|
|
Name of recommended backend
|
|
"""
|
|
info = HardwareDetector.detect_all()
|
|
|
|
# If PyTorch has DLL error, avoid PaddleOCR and EasyOCR (which uses PyTorch)
|
|
if info.pytorch_dll_error:
|
|
logger.info("PyTorch DLL error detected - avoiding PyTorch-based OCR")
|
|
|
|
# Check OpenCV CUDA first
|
|
if info.opencv_cuda_available:
|
|
return 'opencv_east'
|
|
|
|
# Check Tesseract
|
|
try:
|
|
import pytesseract
|
|
return 'tesseract'
|
|
except ImportError:
|
|
pass
|
|
|
|
# Fall back to OpenCV EAST (CPU)
|
|
return 'opencv_east'
|
|
|
|
# No DLL issues - can use any backend
|
|
# Priority: PaddleOCR > EasyOCR > Tesseract > OpenCV EAST
|
|
|
|
if info.pytorch_available and info.paddle_available:
|
|
return 'paddleocr'
|
|
|
|
if info.pytorch_available:
|
|
try:
|
|
import easyocr
|
|
return 'easyocr'
|
|
except ImportError:
|
|
pass
|
|
|
|
try:
|
|
import pytesseract
|
|
return 'tesseract'
|
|
except ImportError:
|
|
pass
|
|
|
|
return 'opencv_east'
|
|
|
|
|
|
# Convenience functions
|
|
def get_hardware_info() -> HardwareInfo:
|
|
"""Get complete hardware information."""
|
|
return HardwareDetector.detect_all()
|
|
|
|
|
|
def print_hardware_summary():
|
|
"""Print hardware summary to console."""
|
|
print(HardwareDetector.get_gpu_summary())
|
|
|
|
|
|
def recommend_ocr_backend() -> str:
|
|
"""Get recommended OCR backend."""
|
|
return HardwareDetector.recommend_ocr_backend()
|