Compare commits
4 Commits
3d74bf13f5
...
524120c9f2
| Author | SHA1 | Date | |
|---|---|---|---|
| 524120c9f2 | |||
| 4d8b4d6114 | |||
| 9159520e8f | |||
| 9ac382e23e |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
GEMINI.md
|
||||
PROGRESS.md
|
||||
__pycache__/
|
||||
monitoring_data.json
|
||||
__pycache__/config.cpython-313.pyc
|
||||
|
||||
@@ -4,4 +4,5 @@
|
||||
- Prioritize security-related events such as failed login attempts, unauthorized access, or unusual network connections.
|
||||
- Focus on events indicating loss of connectivity or unreachable hosts.
|
||||
- Highlight any unexpected network additions or unusual traffic patterns.
|
||||
- The DNS server 8.8.8.8 is Google's public DNS server and is a legitimate destination. Do not flag requests to 8.8.8.8 as anomalous.
|
||||
- The DNS server 8.8.8.8 is Google's public DNS server and is a legitimate destination. Do not flag requests to 8.8.8.8 as anomalous.
|
||||
- Action has been taken against IP addresses 45.88.8.215, 45.88.8.186, 120.48.49.12, and 23.137.255.140. These are completley banned and cannot access the system at all.
|
||||
34
PROGRESS.md
Normal file
34
PROGRESS.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# Project Progress
|
||||
|
||||
## Phase 1: Initial Setup
|
||||
|
||||
1. [x] Create `monitor_agent.py`
|
||||
2. [x] Create `config.py`
|
||||
3. [x] Create `requirements.txt`
|
||||
4. [x] Create `README.md`
|
||||
5. [x] Create `.gitignore`
|
||||
6. [x] Create `SPEC.md`
|
||||
7. [x] Create `PROMPT.md`
|
||||
8. [x] Create `CONSTRAINTS.md`
|
||||
|
||||
## Phase 2: Data Storage
|
||||
|
||||
9. [x] Create `data_storage.py`
|
||||
10. [x] Implement data storage functions in `data_storage.py`
|
||||
11. [x] Update `monitor_agent.py` to use data storage
|
||||
12. [x] Update `SPEC.md` to reflect data storage functionality
|
||||
|
||||
## Phase 3: Expanded Monitoring
|
||||
|
||||
13. [x] Implement CPU temperature monitoring
|
||||
14. [x] Implement GPU temperature monitoring
|
||||
15. [x] Implement system login attempt monitoring
|
||||
16. [x] Update `monitor_agent.py` to include new metrics
|
||||
17. [x] Update `SPEC.md` to reflect new metrics
|
||||
18. [x] Extend `calculate_baselines` to include system temps
|
||||
|
||||
## Phase 4: Troubleshooting
|
||||
|
||||
19. [x] Investigated and resolved issue with `jc` library
|
||||
20. [x] Removed `jc` library as a dependency
|
||||
21. [x] Implemented manual parsing of `sensors` command output
|
||||
11
SPEC.md
11
SPEC.md
@@ -33,6 +33,12 @@ The project will be composed of the following files:
|
||||
- The agent must be able to collect and parse network metrics.
|
||||
- The parsing of this data should result in a structured format (JSON or Python dictionary).
|
||||
|
||||
### 3.3. Monitored Metrics
|
||||
|
||||
- **CPU Temperature**: The agent will monitor the CPU temperature.
|
||||
- **GPU Temperature**: The agent will monitor the GPU temperature.
|
||||
- **System Login Attempts**: The agent will monitor system login attempts.
|
||||
|
||||
### 3.3. LLM Analysis
|
||||
|
||||
- The agent must use a local LLM (via Ollama) to analyze the collected data.
|
||||
@@ -55,9 +61,10 @@ The project will be composed of the following files:
|
||||
- The loop will execute the data collection, analysis, and alerting steps periodically.
|
||||
- The frequency of the monitoring loop will be configurable.
|
||||
|
||||
## 4. Future Features
|
||||
## 4. Data Storage and Baselining
|
||||
|
||||
- **4.1. Data Storage and Averaging**: Store historical system data to calculate baseline averages for more accurate anomaly detection.
|
||||
- **4.1. Data Storage**: The agent will store historical monitoring data in a JSON file (`monitoring_data.json`).
|
||||
- **4.2. Baselining**: The agent will calculate baseline averages for key metrics (e.g., RTT, packet loss) from the stored historical data. This baseline will be used by the LLM to improve anomaly detection accuracy.
|
||||
|
||||
## 5. Technical Requirements
|
||||
|
||||
|
||||
Binary file not shown.
10
config.py
10
config.py
@@ -1,15 +1,15 @@
|
||||
# Configuration for the LLM-Powered Monitoring Agent
|
||||
|
||||
# Discord Webhook URL
|
||||
DISCORD_WEBHOOK_URL = ""
|
||||
DISCORD_WEBHOOK_URL = "https://discord.com/api/webhooks/1024892743987773471/3Oh1KOw9tevBd-XtUkj8Rz2K4SePCFsxKmRrHhQw5spDeZKNzoyYoq6zC2cnTKo8VjJn"
|
||||
|
||||
# Home Assistant Configuration
|
||||
HOME_ASSISTANT_URL = "http://<HOME_ASSISTANT_IP>:8123"
|
||||
HOME_ASSISTANT_TOKEN = ""
|
||||
GOOGLE_HOME_SPEAKER_ID = "media_player.your_speaker_entity_id"
|
||||
HOME_ASSISTANT_URL = "http://192.168.2.112:8123"
|
||||
HOME_ASSISTANT_TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJjOGRmZjI4NDY2MTQ0ZDFkODhiODVjNmQyZTA2MzFiNSIsImlhdCI6MTc1NTU0NDY4OSwiZXhwIjoyMDcwOTA0Njg5fQ.5ZeOkixbdme5SF1QVknZ0bjnPYj1Qrps5HDn-Loi-cQ"
|
||||
GOOGLE_HOME_SPEAKER_ID = "media_player.spencer_room_speaker"
|
||||
|
||||
# Daily Recap Time (in 24-hour format, e.g., "20:00")
|
||||
DAILY_RECAP_TIME = "20:00"
|
||||
|
||||
# Test Mode (True to run once and exit, False to run continuously)
|
||||
TEST_MODE = True
|
||||
TEST_MODE = False
|
||||
|
||||
@@ -1,59 +1,38 @@
|
||||
# Data Storage for the LLM-Powered Monitoring Agent
|
||||
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
DATA_FILE = "historical_data.json"
|
||||
DATA_FILE = 'monitoring_data.json'
|
||||
|
||||
def store_data(data):
|
||||
"""Stores data in a JSON file."""
|
||||
try:
|
||||
with open(DATA_FILE, 'r+') as f:
|
||||
try:
|
||||
historical_data = json.load(f)
|
||||
except json.JSONDecodeError:
|
||||
historical_data = []
|
||||
historical_data.append(data)
|
||||
f.seek(0)
|
||||
json.dump(historical_data, f, indent=2)
|
||||
except FileNotFoundError:
|
||||
with open(DATA_FILE, 'w') as f:
|
||||
json.dump([data], f, indent=2)
|
||||
|
||||
def get_historical_data():
|
||||
"""Retrieves historical data from the JSON file."""
|
||||
try:
|
||||
def load_data():
|
||||
if os.path.exists(DATA_FILE):
|
||||
with open(DATA_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
return []
|
||||
return []
|
||||
|
||||
def store_data(new_data):
|
||||
data = load_data()
|
||||
data.append(new_data)
|
||||
with open(DATA_FILE, 'w') as f:
|
||||
json.dump(data, f, indent=4)
|
||||
|
||||
def calculate_baselines():
|
||||
"""Calculates baseline averages for network metrics."""
|
||||
historical_data = get_historical_data()
|
||||
if not historical_data:
|
||||
return None
|
||||
data = load_data()
|
||||
if not data:
|
||||
return {}
|
||||
|
||||
# Calculate average network metrics
|
||||
total_packets_transmitted = 0
|
||||
total_packets_received = 0
|
||||
total_packet_loss_percent = 0
|
||||
total_round_trip_ms_avg = 0
|
||||
count = 0
|
||||
# For simplicity, we'll average the last 24 hours of data
|
||||
# More complex logic can be added here
|
||||
recent_data = [d for d in data if datetime.fromisoformat(d['system_logs']['timestamp'].replace('Z', '')) > datetime.now() - timedelta(hours=24)]
|
||||
|
||||
for data in historical_data:
|
||||
if "network_metrics" in data and data["network_metrics"]:
|
||||
total_packets_transmitted += data["network_metrics"].get("packets_transmitted", 0) or 0
|
||||
total_packets_received += data["network_metrics"].get("packets_received", 0) or 0
|
||||
total_packet_loss_percent += data["network_metrics"].get("packet_loss_percent", 0) or 0
|
||||
total_round_trip_ms_avg += data["network_metrics"].get("round_trip_ms_avg", 0) or 0
|
||||
count += 1
|
||||
if not recent_data:
|
||||
return {}
|
||||
|
||||
if count == 0:
|
||||
return None
|
||||
|
||||
return {
|
||||
"avg_packets_transmitted": total_packets_transmitted / count,
|
||||
"avg_packets_received": total_packets_received / count,
|
||||
"avg_packet_loss_percent": total_packet_loss_percent / count,
|
||||
"avg_round_trip_ms_avg": total_round_trip_ms_avg / count,
|
||||
baseline_metrics = {
|
||||
'avg_rtt': sum(d['network_metrics']['round_trip_ms_avg'] for d in recent_data) / len(recent_data),
|
||||
'packet_loss': sum(d['network_metrics']['packet_loss_percent'] for d in recent_data) / len(recent_data),
|
||||
'avg_cpu_temp': sum(d['cpu_temperature']['cpu_temperature'] for d in recent_data) / len(recent_data),
|
||||
'avg_gpu_temp': sum(d['gpu_temperature']['gpu_temperature'] for d in recent_data) / len(recent_data),
|
||||
}
|
||||
|
||||
return baseline_metrics
|
||||
@@ -42,6 +42,60 @@ def get_network_metrics():
|
||||
print(f"Error parsing network metrics: {e}")
|
||||
return None
|
||||
|
||||
import re
|
||||
|
||||
def get_cpu_temperature():
|
||||
"""Gets the CPU temperature using the sensors command."""
|
||||
try:
|
||||
sensors_output = subprocess.check_output(["sensors"], text=True)
|
||||
# Use regex to find the CPU temperature
|
||||
match = re.search(r"Package id 0:\s+\+([\d\.]+)", sensors_output)
|
||||
if match:
|
||||
return {"cpu_temperature": float(match.group(1))}
|
||||
else:
|
||||
return {"cpu_temperature": "N/A"}
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
print("Error: 'sensors' command not found. Please install lm-sensors.")
|
||||
return {"cpu_temperature": "N/A"}
|
||||
|
||||
def get_gpu_temperature():
|
||||
"""Gets the GPU temperature using the sensors command."""
|
||||
try:
|
||||
sensors_output = subprocess.check_output(["sensors"], text=True)
|
||||
# Use regex to find the GPU temperature for amdgpu
|
||||
match = re.search(r"edge:\s+\+([\d\.]+)", sensors_output)
|
||||
if match:
|
||||
return {"gpu_temperature": float(match.group(1))}
|
||||
else:
|
||||
# if amdgpu not found, try radeon
|
||||
match = re.search(r"temp1:\s+\+([\d\.]+)", sensors_output)
|
||||
if match:
|
||||
return {"gpu_temperature": float(match.group(1))}
|
||||
else:
|
||||
return {"gpu_temperature": "N/A"}
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
print("Error: 'sensors' command not found. Please install lm-sensors.")
|
||||
return {"gpu_temperature": "N/A"}
|
||||
|
||||
def get_login_attempts():
|
||||
"""Gets system login attempts from /var/log/auth.log."""
|
||||
try:
|
||||
with open("/var/log/auth.log", "r") as f:
|
||||
log_lines = f.readlines()
|
||||
|
||||
failed_logins = []
|
||||
for line in log_lines:
|
||||
if "Failed password" in line:
|
||||
failed_logins.append(line.strip())
|
||||
|
||||
return {"failed_login_attempts": failed_logins}
|
||||
except FileNotFoundError:
|
||||
print("Error: /var/log/auth.log not found.")
|
||||
return {"failed_login_attempts": []}
|
||||
except Exception as e:
|
||||
print(f"Error reading login attempts: {e}")
|
||||
return {"failed_logins": []}
|
||||
|
||||
# --- LLM Interaction Function ---
|
||||
|
||||
def analyze_data_with_llm(data, baselines):
|
||||
@@ -116,11 +170,17 @@ if __name__ == "__main__":
|
||||
print("Running in test mode...")
|
||||
system_logs = get_system_logs()
|
||||
network_metrics = get_network_metrics()
|
||||
cpu_temp = get_cpu_temperature()
|
||||
gpu_temp = get_gpu_temperature()
|
||||
login_attempts = get_login_attempts()
|
||||
|
||||
if system_logs and network_metrics:
|
||||
combined_data = {
|
||||
"system_logs": system_logs,
|
||||
"network_metrics": network_metrics
|
||||
"network_metrics": network_metrics,
|
||||
"cpu_temperature": cpu_temp,
|
||||
"gpu_temperature": gpu_temp,
|
||||
"login_attempts": login_attempts
|
||||
}
|
||||
data_storage.store_data(combined_data)
|
||||
|
||||
@@ -138,13 +198,21 @@ if __name__ == "__main__":
|
||||
print("Running monitoring cycle...")
|
||||
system_logs = get_system_logs()
|
||||
network_metrics = get_network_metrics()
|
||||
cpu_temp = get_cpu_temperature()
|
||||
gpu_temp = get_gpu_temperature()
|
||||
login_attempts = get_login_attempts()
|
||||
|
||||
if system_logs and network_metrics:
|
||||
combined_data = {
|
||||
"system_logs": system_logs,
|
||||
"network_metrics": network_metrics
|
||||
"network_metrics": network_metrics,
|
||||
"cpu_temperature": cpu_temp,
|
||||
"gpu_temperature": gpu_temp,
|
||||
"login_attempts": login_attempts
|
||||
}
|
||||
|
||||
data_storage.store_data(combined_data)
|
||||
|
||||
llm_response = analyze_data_with_llm(combined_data, data_storage.calculate_baselines())
|
||||
|
||||
if llm_response and llm_response != "OK":
|
||||
|
||||
@@ -2,5 +2,4 @@ ollama
|
||||
discord-webhook
|
||||
requests
|
||||
syslog-rfc5424-parser
|
||||
apachelogs
|
||||
jc
|
||||
apachelogs
|
||||
Reference in New Issue
Block a user