From 06c3390a1f91589fb5f41dc49ba340e48f3e3540 Mon Sep 17 00:00:00 2001 From: Alexander Borg Date: Sun, 7 Sep 2025 12:41:05 +0200 Subject: [PATCH] Fix jwt-token --- debug_device_status.py | 140 +++++++++++++++++++++++ docs/DEVICE_HEALTH_MONITORING.md | 183 +++++++++++++++++++++++++++++++ 2 files changed, 323 insertions(+) create mode 100644 debug_device_status.py create mode 100644 docs/DEVICE_HEALTH_MONITORING.md diff --git a/debug_device_status.py b/debug_device_status.py new file mode 100644 index 0000000..a09e383 --- /dev/null +++ b/debug_device_status.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +""" +Device Status Debug Script +Checks the actual device data to debug why the device shows as offline +""" + +import requests +import json +import os +from datetime import datetime + +# Disable SSL warnings for self-signed certificates +import warnings +warnings.filterwarnings('ignore', message='Unverified HTTPS request') + +# Configuration +API_BASE_URL = os.getenv('API_BASE_URL', 'http://localhost:3002/api') +BASE_PATH = os.getenv('VITE_BASE_PATH', '').rstrip('/') + +if BASE_PATH and not API_BASE_URL.endswith('/api'): + domain = API_BASE_URL.replace('/api', '').replace('/drones/api', '').replace('/uggla/api', '') + API_BASE_URL = f"{domain}{BASE_PATH}/api" + +def debug_device_status(): + """Debug device status calculation""" + print("šŸ” DEVICE STATUS DEBUG") + print("=" * 50) + + try: + # Get all devices + response = requests.get(f"{API_BASE_URL}/devices", verify=False, timeout=10) + + if response.status_code == 200: + data = response.json() + if data.get('success') and data.get('data'): + devices = data['data'] + + # Find Stockholm device (device ID 1001) + stockholm_device = None + for device in devices: + if device.get('id') == 1001 or 'Stockholm' in device.get('name', ''): + stockholm_device = device + break + + if stockholm_device: + print(f"šŸ“± Found Stockholm Device:") + print(f" ID: {stockholm_device['id']}") + print(f" Name: {stockholm_device.get('name', 'N/A')}") + print(f" Active: {stockholm_device.get('is_active', False)}") + print(f" Approved: {stockholm_device.get('is_approved', False)}") + print(f" Last Heartbeat: {stockholm_device.get('last_heartbeat', 'Never')}") + print(f" Heartbeat Interval: {stockholm_device.get('heartbeat_interval', 'Not set')} seconds") + + # Calculate status manually + if stockholm_device.get('last_heartbeat'): + from datetime import datetime + now = datetime.now() + last_heartbeat = datetime.fromisoformat(stockholm_device['last_heartbeat'].replace('Z', '+00:00')) + time_diff = (now - last_heartbeat.replace(tzinfo=None)).total_seconds() + + expected_interval = stockholm_device.get('heartbeat_interval', 300) + threshold = expected_interval * 2 + + print(f"\nšŸ”¢ Status Calculation:") + print(f" Current Time: {now}") + print(f" Last Heartbeat: {last_heartbeat}") + print(f" Time Since Last: {time_diff:.1f} seconds") + print(f" Expected Interval: {expected_interval} seconds") + print(f" Online Threshold: {threshold} seconds") + print(f" Should be Online: {time_diff < threshold}") + + # Check device stats + if 'stats' in stockholm_device: + stats = stockholm_device['stats'] + print(f"\nšŸ“Š Device Stats:") + print(f" Status: {stats.get('status', 'Unknown')}") + print(f" Time Since Last Heartbeat: {stats.get('time_since_last_heartbeat', 'Unknown')} seconds") + print(f" Detections 24h: {stats.get('detections_24h', 0)}") + else: + print("āŒ Stockholm device not found") + print("Available devices:") + for device in devices: + print(f" - ID {device['id']}: {device.get('name', 'Unnamed')}") + else: + print(f"āŒ No devices found") + else: + print(f"āŒ HTTP Error: {response.status_code}") + print(f" Response: {response.text}") + + except Exception as e: + print(f"āŒ Error: {e}") + +def send_test_heartbeat(): + """Send a test heartbeat to see if it gets processed""" + print(f"\nšŸ’“ Sending Test Heartbeat") + print("-" * 30) + + try: + payload = { + 'type': 'heartbeat', + 'key': '1001' # Stockholm device key + } + + print(f"šŸ“” Sending to: {API_BASE_URL}/detectors") + print(f"šŸ“¦ Payload: {json.dumps(payload)}") + + response = requests.post( + f"{API_BASE_URL}/detectors", + json=payload, + verify=False, + timeout=10 + ) + + print(f"šŸ“Š Response: {response.status_code}") + if response.status_code in [200, 201]: + data = response.json() + print(f"āœ… Success: {data.get('message', 'OK')}") + else: + print(f"āŒ Error: {response.text}") + + except Exception as e: + print(f"āŒ Error sending heartbeat: {e}") + +def main(): + print(f"šŸ”— API URL: {API_BASE_URL}") + print(f"ā° Test Time: {datetime.now()}") + print() + + debug_device_status() + send_test_heartbeat() + + print(f"\nā³ Waiting 2 seconds...") + import time + time.sleep(2) + + print(f"\nšŸ”„ Checking status after heartbeat:") + debug_device_status() + +if __name__ == "__main__": + main() diff --git a/docs/DEVICE_HEALTH_MONITORING.md b/docs/DEVICE_HEALTH_MONITORING.md new file mode 100644 index 0000000..80e1065 --- /dev/null +++ b/docs/DEVICE_HEALTH_MONITORING.md @@ -0,0 +1,183 @@ +# Device Health Monitoring System + +The Device Health Monitoring System automatically monitors all active and approved devices for heartbeat activity and sends alerts when devices go offline for extended periods. + +## Features + +### Automatic Health Monitoring +- **Continuous Monitoring**: Checks device health every 5 minutes +- **Offline Detection**: Devices are considered offline after 30 minutes without heartbeat +- **Recovery Detection**: Automatically detects when offline devices come back online +- **Alert Integration**: Uses the existing alert system for SMS/email/webhook notifications + +### Alert Capabilities +- **SMS Alerts**: Send SMS notifications when devices go offline or recover +- **Email Alerts**: Send email notifications (when configured) +- **Webhook Integration**: Send webhook notifications for external systems +- **Recovery Notifications**: Automatic "all clear" messages when devices recover + +### Configuration +- **Customizable Thresholds**: Configure offline detection timeouts +- **Alert Rules**: Use existing alert rule system to configure recipients +- **Channel Selection**: Choose SMS, email, webhook, or multiple channels +- **Device-Specific Rules**: Create rules for specific devices or all devices + +## Setup + +### 1. Alert Rule Configuration + +Create alert rules for device offline monitoring using the web interface or API: + +```json +{ + "name": "Device Offline Alert", + "description": "Alert when security devices go offline", + "conditions": { + "device_offline": true, + "device_ids": [1941875381, 1941875382] // Optional: specific devices + }, + "alert_channels": ["sms", "email"], + "sms_phone_number": "+46701234567", + "email": "admin@company.com", + "is_active": true, + "priority": "high" +} +``` + +### 2. Service Configuration + +The service automatically starts with the server and can be configured with environment variables: + +- **Check Interval**: How often to check device health (default: 5 minutes) +- **Offline Threshold**: How long without heartbeat before considering offline (default: 30 minutes) + +### 3. SMS Configuration + +For SMS alerts, configure Twilio credentials: + +```bash +TWILIO_ACCOUNT_SID=your_account_sid +TWILIO_AUTH_TOKEN=your_auth_token +TWILIO_PHONE_NUMBER=your_twilio_phone +``` + +## API Endpoints + +### Get Service Status +``` +GET /api/device-health/status +``` + +Returns the current status of the device health monitoring service: + +```json +{ + "success": true, + "data": { + "isRunning": true, + "checkIntervalMinutes": 5, + "offlineThresholdMinutes": 30, + "offlineDevicesCount": 1, + "offlineDevices": [ + { + "deviceId": 1941875383, + "deviceName": "Guard Tower 3", + "offlineSince": "2025-09-07T10:00:00Z", + "alertSent": true + } + ] + } +} +``` + +### Trigger Manual Health Check +``` +POST /api/device-health/check +``` + +Forces an immediate health check of all devices. + +### Start/Stop Service +``` +POST /api/device-health/start +POST /api/device-health/stop +``` + +Control the health monitoring service (normally runs automatically). + +## Alert Messages + +### Offline Alert +``` +🚨 DEVICE OFFLINE ALERT 🚨 + +šŸ“ LOCATION: Stockholm Castle +šŸ”§ DEVICE: Guard Tower 1 +ā° OFFLINE FOR: 45 minutes +šŸ“… LAST SEEN: 2025-09-07 14:30:00 + +āŒ Device has stopped sending heartbeats. +šŸ”§ Check device power, network connection, or physical access. + +āš ļø Security monitoring may be compromised in this area. +``` + +### Recovery Alert +``` +āœ… DEVICE RECOVERED āœ… + +šŸ“ LOCATION: Stockholm Castle +šŸ”§ DEVICE: Guard Tower 1 +ā° RECOVERED AT: 2025-09-07 15:15:00 + +āœ… Device is now sending heartbeats again. +šŸ›”ļø Security monitoring restored for this area. +``` + +## Testing + +Use the provided test script to verify the system is working: + +```bash +python3 test_device_health.py +``` + +This will: +- Check the device health service status +- List all devices and their current health status +- Show configured alert rules for device offline monitoring +- Trigger a manual health check + +## Integration with Existing Systems + +The device health monitoring integrates seamlessly with: + +1. **Existing Alert System**: Uses the same alert rules, channels, and logging +2. **Device Management**: Works with the existing device approval and activation system +3. **Heartbeat System**: Uses the existing heartbeat infrastructure +4. **Dashboard**: Device status is already displayed in the device list + +## Troubleshooting + +### No Alerts Received +1. Check if device offline alert rules are configured and active +2. Verify SMS/email credentials are properly configured +3. Check device health service status via API +4. Ensure devices are marked as active and approved + +### False Positives +1. Adjust the offline threshold if devices have irregular heartbeat patterns +2. Check network connectivity between devices and server +3. Verify heartbeat intervals are properly configured for each device + +### Service Not Running +1. Check server logs for startup errors +2. Verify database connectivity +3. Restart the server to reinitialize the service + +## Monitoring and Logs + +- Service status is logged to console with timestamps +- Alert sending is logged with recipient and status information +- Manual health checks can be triggered via API for testing +- Service automatically handles graceful shutdown on server restart