Offloaded data detection from the LLM and hardcoded it

This commit is contained in:
2025-08-24 13:30:21 -05:00
parent 6f7e99639c
commit d102dc30f4
2 changed files with 13 additions and 5 deletions

View File

@@ -76,8 +76,6 @@
- [x] When calculating averages, please round up to the nearest integer. We only want to deliver whole integers to the LLM to process, and nothing with decimal points. It gets confused with decimal points. - [x] When calculating averages, please round up to the nearest integer. We only want to deliver whole integers to the LLM to process, and nothing with decimal points. It gets confused with decimal points.
- [x] In the discord message, please include the exact specific details and the log of the problem that prompted the alert - [x] In the discord message, please include the exact specific details and the log of the problem that prompted the alert
## TODO
## Phase 7: Offloading Analysis from LLM ## Phase 7: Offloading Analysis from LLM
39. [x] Create a new function `analyze_data_locally` in `monitor_agent.py`. 39. [x] Create a new function `analyze_data_locally` in `monitor_agent.py`.
@@ -93,4 +91,6 @@
41.1. [x] Call `analyze_data_locally` to get the list of anomalies. 41.1. [x] Call `analyze_data_locally` to get the list of anomalies.
41.2. [x] If anomalies are found, call `generate_llm_report` to create the report. 41.2. [x] If anomalies are found, call `generate_llm_report` to create the report.
41.3. [x] Use the output of `generate_llm_report` for alerting. 41.3. [x] Use the output of `generate_llm_report` for alerting.
42. [x] Remove the detailed analytical instructions from `build_llm_prompt` as they will be handled by `analyze_data_locally`. 42. [x] Remove the detailed analytical instructions from `build_llm_prompt` as they will be handled by `analyze_data_locally`.
## TODO

View File

@@ -284,6 +284,7 @@ def build_llm_prompt(anomalies):
def generate_llm_report(anomalies): def generate_llm_report(anomalies):
"""Generates a report from a list of anomalies using the local LLM.""" """Generates a report from a list of anomalies using the local LLM."""
logger.info("Generating LLM report...")
if not anomalies: if not anomalies:
return {"severity": "none", "reason": ""} return {"severity": "none", "reason": ""}
@@ -322,7 +323,13 @@ def generate_llm_report(anomalies):
def send_discord_alert(llm_response, combined_data): def send_discord_alert(llm_response, combined_data):
"""Sends an alert to Discord.""" """Sends an alert to Discord."""
reason = llm_response.get('reason', 'No reason provided.') reason = llm_response.get('reason', 'No reason provided.')
message = f"**High Severity Alert:**\n> {reason}\n\n**Relevant Data:**\n```json\n{json.dumps(combined_data, indent=2)}\n```" message = f"""**High Severity Alert:**
> {reason}
**Relevant Data:**
```json
{json.dumps(combined_data, indent=2)}
```"""
webhook = DiscordWebhook(url=config.DISCORD_WEBHOOK_URL, content=message) webhook = DiscordWebhook(url=config.DISCORD_WEBHOOK_URL, content=message)
try: try:
response = webhook.execute() response = webhook.execute()
@@ -430,6 +437,7 @@ def run_monitoring_cycle(nmap_scan_counter):
anomalies = analyze_data_locally(combined_data, baselines, known_issues, port_applications) anomalies = analyze_data_locally(combined_data, baselines, known_issues, port_applications)
if anomalies: if anomalies:
logger.info(f"Detected {len(anomalies)} anomalies: {anomalies}")
llm_response = generate_llm_report(anomalies) llm_response = generate_llm_report(anomalies)
if llm_response and llm_response.get('severity') != "none": if llm_response and llm_response.get('severity') != "none":
daily_events.append(llm_response.get('reason')) daily_events.append(llm_response.get('reason'))
@@ -452,4 +460,4 @@ def main():
time.sleep(300) # Run every 5 minutes time.sleep(300) # Run every 5 minutes
if __name__ == "__main__": if __name__ == "__main__":
main() main()