Converted Responses to JSON, improved severity detection, and built a known issues feed

This commit is contained in:
2025-08-20 12:06:11 -05:00
parent 7eaff1d08c
commit 0169483738
6 changed files with 76 additions and 16 deletions

View File

@@ -127,10 +127,13 @@ def analyze_data_with_llm(data, baselines):
with open("CONSTRAINTS.md", "r") as f:
constraints = f.read()
with open("known_issues.json", "r") as f:
known_issues = json.load(f)
prompt = f"""
**Role:** You are a dedicated and expert system administrator. Your primary role is to identify anomalies and provide concise, actionable reports.
**Instruction:** Analyze the following system and network data for any activity that appears out of place or different. Consider unusual values, errors, or unexpected patterns as anomalies. Compare the current data with the historical baseline data to identify significant deviations.
**Instruction:** Analyze the following system and network data for any activity that appears out of place or different. Consider unusual values, errors, or unexpected patterns as anomalies. Compare the current data with the historical baseline data to identify significant deviations. Consult the known issues feed to avoid flagging resolved or expected issues.
**Context:**
Here is the system data in JSON format for your analysis: {json.dumps(data, indent=2)}
@@ -138,16 +141,41 @@ def analyze_data_with_llm(data, baselines):
**Historical Baseline Data:**
{json.dumps(baselines, indent=2)}
**Known Issues Feed:**
{json.dumps(known_issues, indent=2)}
**Constraints and Guidelines:**
{constraints}
**Output Request:** If you find an anomaly, provide a report as a single, coherent, natural language paragraph. The report must clearly state the anomaly, its potential cause, and its severity (e.g., high, medium, low). If no anomaly is found, respond with "OK".
**Output Request:** If you find an anomaly, provide a report as a single JSON object with two keys: "severity" and "reason". The "severity" must be one of "high", "medium", "low", or "none". The "reason" must be a natural language explanation of the anomaly. If no anomaly is found, return a single JSON object with "severity" set to "none" and "reason" as an empty string. Do not wrap the JSON in markdown or any other formatting.
**Reasoning Hint:** Think step by step to come to your conclusion. This is very important.
"""
try:
response = ollama.generate(model="llama3.1:8b", prompt=prompt)
return response['response'].strip()
# Sanitize the response to ensure it's valid JSON
sanitized_response = response['response'].strip()
# Find the first '{' and the last '}' to extract the JSON object
start_index = sanitized_response.find('{')
end_index = sanitized_response.rfind('}')
if start_index != -1 and end_index != -1:
json_string = sanitized_response[start_index:end_index+1]
try:
return json.loads(json_string)
except json.JSONDecodeError:
# If parsing a single object fails, try parsing as a list
try:
json_list = json.loads(json_string)
if isinstance(json_list, list) and json_list:
return json_list[0] # Return the first object in the list
except json.JSONDecodeError as e:
print(f"Error decoding LLM response: {e}")
# Fallback for invalid JSON
return {"severity": "low", "reason": response['response'].strip()}
else:
# Handle cases where the response is not valid JSON
print(f"LLM returned a non-JSON response: {sanitized_response}")
return {"severity": "low", "reason": sanitized_response}
except Exception as e:
print(f"Error interacting with LLM: {e}")
return None
@@ -222,11 +250,11 @@ if __name__ == "__main__":
llm_response = analyze_data_with_llm(combined_data, data_storage.calculate_baselines())
if llm_response and llm_response != "OK":
print(f"Anomaly detected: {llm_response}")
if "high" in llm_response.lower():
send_discord_alert(llm_response)
send_google_home_alert(llm_response)
if llm_response and llm_response.get('severity') != "none":
print(f"Anomaly detected: {llm_response.get('reason')}")
if llm_response.get('severity') == "high":
send_discord_alert(llm_response.get('reason'))
send_google_home_alert(llm_response.get('reason'))
else:
print("No anomaly detected.")
else:
@@ -252,11 +280,11 @@ if __name__ == "__main__":
llm_response = analyze_data_with_llm(combined_data, data_storage.calculate_baselines())
if llm_response and llm_response != "OK":
daily_events.append(llm_response)
if "high" in llm_response.lower():
send_discord_alert(llm_response)
send_google_home_alert(llm_response)
if llm_response and llm_response.get('severity') != "none":
daily_events.append(llm_response.get('reason'))
if llm_response.get('severity') == "high":
send_discord_alert(llm_response.get('reason'))
send_google_home_alert(llm_response.get('reason'))
# Daily Recap Logic
current_time = time.strftime("%H:%M")