Nmap... working?

This commit is contained in:
2025-08-20 12:51:11 -05:00
parent f6cbe1da8f
commit dd673829d2
6 changed files with 110 additions and 21 deletions

View File

@@ -49,16 +49,16 @@
## Network Scanning (Nmap Integration) ## Network Scanning (Nmap Integration)
1. [ ] Add `python-nmap` to `requirements.txt` and install. 1. [x] Add `python-nmap` to `requirements.txt` and install.
2. [ ] Define `NMAP_TARGETS` and `NMAP_SCAN_OPTIONS` in `config.py`. 2. [x] Define `NMAP_TARGETS` and `NMAP_SCAN_OPTIONS` in `config.py`.
3. [ ] Create a new function `get_nmap_scan_results()` in `monitor_agent.py`: 3. [x] Create a new function `get_nmap_scan_results()` in `monitor_agent.py`:
* [ ] Use `python-nmap` to perform a scan on the defined targets with the specified options. * [x] Use `python-nmap` to perform a scan on the defined targets with the specified options.
* [ ] Return the parsed results. * [x] Return the parsed results.
4. [ ] Integrate `get_nmap_scan_results()` into the main monitoring loop: 4. [x] Integrate `get_nmap_scan_results()` into the main monitoring loop:
* [ ] Call this function periodically (e.g., less frequently than other metrics). * [x] Call this function periodically (e.g., less frequently than other metrics).
* [ ] Add the `nmap` results to the `combined_data` dictionary. * [x] Add the `nmap` results to the `combined_data` dictionary.
5. [ ] Update `data_storage.py` to store `nmap` results. 5. [x] Update `data_storage.py` to store `nmap` results.
6. [ ] Extend `calculate_baselines()` in `data_storage.py` to include `nmap` baselines: 6. [x] Extend `calculate_baselines()` in `data_storage.py` to include `nmap` baselines:
* [ ] Compare current `nmap` results with historical data to identify changes. * [x] Compare current `nmap` results with historical data to identify changes.
7. [ ] Modify `analyze_data_with_llm()` prompt to include `nmap` scan results for analysis. 7. [x] Modify `analyze_data_with_llm()` prompt to include `nmap` scan results for analysis.
8. [ ] Consider how to handle `nmap` permissions. 8. [x] Consider how to handle `nmap` permissions.

View File

@@ -65,6 +65,16 @@ The script will start a continuous monitoring loop. Every 5 minutes, it will:
The script will print its status and any detected anomalies to the console. The script will print its status and any detected anomalies to the console.
### Nmap Scans
The agent uses `nmap` to scan the network for open ports. By default, it uses a TCP SYN scan (`-sS`), which requires root privileges. If the script is not run as root, it will fall back to a TCP connect scan (`-sT`), which does not require root privileges but is slower and more likely to be detected.
To run the agent with root privileges, use the `sudo` command:
```bash
sudo python monitor_agent.py
```
## 4. Features ## 4. Features
### Priority System ### Priority System

View File

@@ -11,5 +11,9 @@ GOOGLE_HOME_SPEAKER_ID = "media_player.spencer_room_speaker"
# Daily Recap Time (in 24-hour format, e.g., "20:00") # Daily Recap Time (in 24-hour format, e.g., "20:00")
DAILY_RECAP_TIME = "20:00" DAILY_RECAP_TIME = "20:00"
# Nmap Configuration
NMAP_TARGETS = "192.168.1.0/24"
NMAP_SCAN_OPTIONS = "-sS -T4"
# Test Mode (True to run once and exit, False to run continuously) # Test Mode (True to run once and exit, False to run continuously)
TEST_MODE = False TEST_MODE = False

View File

@@ -35,4 +35,22 @@ def calculate_baselines():
'avg_gpu_temp': sum(d['gpu_temperature']['gpu_temperature'] for d in recent_data if d['gpu_temperature']['gpu_temperature'] != "N/A") / len(recent_data), 'avg_gpu_temp': sum(d['gpu_temperature']['gpu_temperature'] for d in recent_data if d['gpu_temperature']['gpu_temperature'] != "N/A") / len(recent_data),
} }
# Baseline for open ports from nmap scans
host_ports = {}
for d in recent_data:
if 'nmap_results' in d and 'scan' in d['nmap_results']:
for host, scan_data in d['nmap_results']['scan'].items():
if host not in host_ports:
host_ports[host] = set()
if 'tcp' in scan_data:
for port, port_data in scan_data['tcp'].items():
if port_data['state'] == 'open':
host_ports[host].add(port)
# Convert sets to sorted lists for JSON serialization
for host, ports in host_ports.items():
host_ports[host] = sorted(list(ports))
baseline_metrics['host_ports'] = host_ports
return baseline_metrics return baseline_metrics

View File

@@ -11,6 +11,7 @@ import re
import os import os
from datetime import datetime, timezone from datetime import datetime, timezone
import pingparsing import pingparsing
import nmap
# Load configuration # Load configuration
import config import config
@@ -40,7 +41,7 @@ def get_system_logs():
parsed_logs = [] parsed_logs = []
for line in log_lines: for line in log_lines:
try: try:
parsed_logs.append(parser.parse(line).as_dict()) parsed_logs.append(parser.parse(line).as_dict()) # type: ignore
except Exception: except Exception:
# If parsing fails, just append the raw line # If parsing fails, just append the raw line
parsed_logs.append({"raw_log": line.strip()}) parsed_logs.append({"raw_log": line.strip()})
@@ -120,6 +121,21 @@ def get_login_attempts():
print(f"Error reading login attempts: {e}") print(f"Error reading login attempts: {e}")
return {"failed_logins": []} return {"failed_logins": []}
def get_nmap_scan_results():
"""Performs an Nmap scan and returns the results."""
try:
nm = nmap.PortScanner()
scan_options = config.NMAP_SCAN_OPTIONS
if os.geteuid() != 0 and "-sS" in scan_options:
print("Warning: Nmap -sS scan requires root privileges. Falling back to -sT.")
scan_options = scan_options.replace("-sS", "-sT")
scan_results = nm.scan(hosts=config.NMAP_TARGETS, arguments=scan_options)
return scan_results
except Exception as e:
print(f"Error performing Nmap scan: {e}")
return {"error": "Nmap scan failed"}
# --- LLM Interaction Function --- # --- LLM Interaction Function ---
def analyze_data_with_llm(data, baselines): def analyze_data_with_llm(data, baselines):
@@ -130,10 +146,35 @@ def analyze_data_with_llm(data, baselines):
with open("known_issues.json", "r") as f: with open("known_issues.json", "r") as f:
known_issues = json.load(f) known_issues = json.load(f)
# Compare current nmap results with baseline
nmap_changes = {"new_hosts": [], "changed_ports": {}}
if "nmap_results" in data and "host_ports" in baselines:
current_hosts = set(data["nmap_results"].get("scan", {}).keys())
baseline_hosts = set(baselines["host_ports"].keys())
# New hosts
nmap_changes["new_hosts"] = sorted(list(current_hosts - baseline_hosts))
# Changed ports on existing hosts
for host in current_hosts.intersection(baseline_hosts):
current_ports = set()
if "tcp" in data["nmap_results"]["scan"][host]:
for port, port_data in data["nmap_results"]["scan"][host]["tcp"].items():
if port_data["state"] == "open":
current_ports.add(port)
baseline_ports = set(baselines["host_ports"].get(host, []))
newly_opened = sorted(list(current_ports - baseline_ports))
newly_closed = sorted(list(baseline_ports - current_ports))
if newly_opened or newly_closed:
nmap_changes["changed_ports"][host] = {"opened": newly_opened, "closed": newly_closed}
prompt = f""" prompt = f"""
**Role:** You are a dedicated and expert system administrator. Your primary role is to identify anomalies and provide concise, actionable reports. **Role:** You are a dedicated and expert system administrator. Your primary role is to identify anomalies and provide concise, actionable reports.
**Instruction:** Analyze the following system and network data for any activity that appears out of place or different. Consider unusual values, errors, or unexpected patterns as anomalies. Compare the current data with the historical baseline data to identify significant deviations. Consult the known issues feed to avoid flagging resolved or expected issues. **Instruction:** Analyze the following system and network data for any activity that appears out of place or different. Consider unusual values, errors, or unexpected patterns as anomalies. Compare the current data with the historical baseline data to identify significant deviations. Consult the known issues feed to avoid flagging resolved or expected issues. Pay special attention to the Nmap scan results for any new or unexpected open ports.
**Context:** **Context:**
Here is the system data in JSON format for your analysis: {json.dumps(data, indent=2)} Here is the system data in JSON format for your analysis: {json.dumps(data, indent=2)}
@@ -141,6 +182,9 @@ def analyze_data_with_llm(data, baselines):
**Historical Baseline Data:** **Historical Baseline Data:**
{json.dumps(baselines, indent=2)} {json.dumps(baselines, indent=2)}
**Nmap Scan Changes:**
{json.dumps(nmap_changes, indent=2)}
**Known Issues Feed:** **Known Issues Feed:**
{json.dumps(known_issues, indent=2)} {json.dumps(known_issues, indent=2)}
@@ -171,11 +215,11 @@ def analyze_data_with_llm(data, baselines):
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
print(f"Error decoding LLM response: {e}") print(f"Error decoding LLM response: {e}")
# Fallback for invalid JSON # Fallback for invalid JSON
return {"severity": "low", "reason": response['response'].strip()} return {{"severity": "low", "reason": response['response'].strip()}} # type: ignore
else: else:
# Handle cases where the response is not valid JSON # Handle cases where the response is not valid JSON
print(f"LLM returned a non-JSON response: {sanitized_response}") print(f"LLM returned a non-JSON response: {sanitized_response}")
return {"severity": "low", "reason": sanitized_response} return {{"severity": "low", "reason": sanitized_response}} # type: ignore
except Exception as e: except Exception as e:
print(f"Error interacting with LLM: {e}") print(f"Error interacting with LLM: {e}")
return None return None
@@ -236,6 +280,7 @@ if __name__ == "__main__":
cpu_temp = get_cpu_temperature() cpu_temp = get_cpu_temperature()
gpu_temp = get_gpu_temperature() gpu_temp = get_gpu_temperature()
login_attempts = get_login_attempts() login_attempts = get_login_attempts()
nmap_results = get_nmap_scan_results()
if system_logs and network_metrics: if system_logs and network_metrics:
combined_data = { combined_data = {
@@ -244,7 +289,8 @@ if __name__ == "__main__":
"network_metrics": network_metrics, "network_metrics": network_metrics,
"cpu_temperature": cpu_temp, "cpu_temperature": cpu_temp,
"gpu_temperature": gpu_temp, "gpu_temperature": gpu_temp,
"login_attempts": login_attempts "login_attempts": login_attempts,
"nmap_results": nmap_results
} }
data_storage.store_data(combined_data) data_storage.store_data(combined_data)
@@ -258,6 +304,7 @@ if __name__ == "__main__":
else: else:
print("No anomaly detected.") print("No anomaly detected.")
else: else:
nmap_scan_counter = 0
while True: while True:
print("Running monitoring cycle...") print("Running monitoring cycle...")
system_logs = get_system_logs() system_logs = get_system_logs()
@@ -266,6 +313,12 @@ if __name__ == "__main__":
gpu_temp = get_gpu_temperature() gpu_temp = get_gpu_temperature()
login_attempts = get_login_attempts() login_attempts = get_login_attempts()
nmap_results = None
if nmap_scan_counter == 0:
nmap_results = get_nmap_scan_results()
nmap_scan_counter = (nmap_scan_counter + 1) % 4 # Run nmap scan every 4th cycle (20 minutes)
if system_logs and network_metrics: if system_logs and network_metrics:
combined_data = { combined_data = {
"timestamp": datetime.now(timezone.utc).isoformat(), "timestamp": datetime.now(timezone.utc).isoformat(),
@@ -276,6 +329,9 @@ if __name__ == "__main__":
"login_attempts": login_attempts "login_attempts": login_attempts
} }
if nmap_results:
combined_data["nmap_results"] = nmap_results
data_storage.store_data(combined_data) data_storage.store_data(combined_data)
llm_response = analyze_data_with_llm(combined_data, data_storage.calculate_baselines()) llm_response = analyze_data_with_llm(combined_data, data_storage.calculate_baselines())
@@ -296,3 +352,4 @@ if __name__ == "__main__":
time.sleep(300) # Run every 5 minutes time.sleep(300) # Run every 5 minutes

View File

@@ -1,6 +1,6 @@
ollama
discord-webhook discord-webhook
requests requests
ollama
syslog-rfc5424-parser syslog-rfc5424-parser
apachelogs
pingparsing pingparsing
python-nmap