Spaces:
Runtime error
Runtime error
Update Codriaoagix.py
Browse files- Codriaoagix.py +42 -12
Codriaoagix.py
CHANGED
|
@@ -12,8 +12,9 @@ from datetime import datetime, timedelta
|
|
| 12 |
import blockchain_module
|
| 13 |
import speech_recognition as sr
|
| 14 |
import pyttsx3
|
| 15 |
-
|
| 16 |
|
|
|
|
| 17 |
from components.agix_reflection import SelfReflectiveAI
|
| 18 |
from components.multi_agent import MultiAgentSystem
|
| 19 |
from components.ar_integration import ARDataOverlay
|
|
@@ -22,12 +23,7 @@ from components.federated_learning import FederatedAI
|
|
| 22 |
from utils.database import Database
|
| 23 |
from utils.logger import logger
|
| 24 |
from secure_memory import SecureMemorySession
|
| 25 |
-
import
|
| 26 |
-
from cryptography.fernet import Fernet
|
| 27 |
-
|
| 28 |
-
key = os.environ.get("CODRIAO_SECRET_KEY").encode()
|
| 29 |
-
self._encryption_key = key
|
| 30 |
-
self.secure_memory = SecureMemorySession(self._encryption_key)
|
| 31 |
|
| 32 |
class AICoreAGIX:
|
| 33 |
def __init__(self, config_path: str = "config.json"):
|
|
@@ -44,10 +40,14 @@ class AICoreAGIX:
|
|
| 44 |
self.ar_overlay = ARDataOverlay()
|
| 45 |
self.neural_symbolic_processor = NeuralSymbolicProcessor()
|
| 46 |
self.federated_ai = FederatedAI()
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
| 49 |
self.secure_memory = SecureMemorySession(self._encryption_key)
|
|
|
|
| 50 |
self.speech_engine = pyttsx3.init()
|
|
|
|
| 51 |
|
| 52 |
async def generate_response(self, query: str, user_id: int) -> Dict[str, Any]:
|
| 53 |
try:
|
|
@@ -58,14 +58,26 @@ class AICoreAGIX:
|
|
| 58 |
if result["status"] == "flagged":
|
| 59 |
logger.warning(result["warning"])
|
| 60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
# Vectorize and encrypt
|
| 62 |
vectorized_query = self._vectorize_query(query)
|
| 63 |
self.secure_memory.encrypt_vector(user_id, vectorized_query)
|
| 64 |
|
| 65 |
-
# (Optional) retrieve memory
|
| 66 |
user_vectors = self.secure_memory.decrypt_vectors(user_id)
|
| 67 |
|
| 68 |
-
# Main
|
| 69 |
model_response = await self._generate_local_model_response(query)
|
| 70 |
agent_response = self.multi_agent_system.delegate_task(query)
|
| 71 |
self_reflection = self.self_reflective_ai.evaluate_response(query, model_response)
|
|
@@ -88,6 +100,24 @@ class AICoreAGIX:
|
|
| 88 |
logger.error(f"Response generation failed: {e}")
|
| 89 |
return {"error": "Processing failed - safety protocols engaged"}
|
| 90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
def _load_config(self, config_path: str) -> dict:
|
| 92 |
with open(config_path, 'r') as file:
|
| 93 |
return json.load(file)
|
|
@@ -112,4 +142,4 @@ class AICoreAGIX:
|
|
| 112 |
|
| 113 |
def _speak_response(self, response: str):
|
| 114 |
self.speech_engine.say(response)
|
| 115 |
-
self.speech_engine.runAndWait()
|
|
|
|
| 12 |
import blockchain_module
|
| 13 |
import speech_recognition as sr
|
| 14 |
import pyttsx3
|
| 15 |
+
import os
|
| 16 |
|
| 17 |
+
from ethical_filter import EthicalFilter
|
| 18 |
from components.agix_reflection import SelfReflectiveAI
|
| 19 |
from components.multi_agent import MultiAgentSystem
|
| 20 |
from components.ar_integration import ARDataOverlay
|
|
|
|
| 23 |
from utils.database import Database
|
| 24 |
from utils.logger import logger
|
| 25 |
from secure_memory import SecureMemorySession
|
| 26 |
+
from codriao_tb_module import CodriaoHealthModule
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
class AICoreAGIX:
|
| 29 |
def __init__(self, config_path: str = "config.json"):
|
|
|
|
| 40 |
self.ar_overlay = ARDataOverlay()
|
| 41 |
self.neural_symbolic_processor = NeuralSymbolicProcessor()
|
| 42 |
self.federated_ai = FederatedAI()
|
| 43 |
+
|
| 44 |
+
# Security + Memory
|
| 45 |
+
key = os.environ.get("CODRIAO_SECRET_KEY").encode()
|
| 46 |
+
self._encryption_key = key
|
| 47 |
self.secure_memory = SecureMemorySession(self._encryption_key)
|
| 48 |
+
|
| 49 |
self.speech_engine = pyttsx3.init()
|
| 50 |
+
self.health_module = CodriaoHealthModule(ai_core=self)
|
| 51 |
|
| 52 |
async def generate_response(self, query: str, user_id: int) -> Dict[str, Any]:
|
| 53 |
try:
|
|
|
|
| 58 |
if result["status"] == "flagged":
|
| 59 |
logger.warning(result["warning"])
|
| 60 |
|
| 61 |
+
# Check if user explicitly requests TB analysis
|
| 62 |
+
if any(phrase in query.lower() for phrase in ["tb check", "analyze my tb", "run tb diagnostics", "tb test"]):
|
| 63 |
+
result = await self.run_tb_diagnostics("tb_image.jpg", "tb_cough.wav", user_id)
|
| 64 |
+
return {
|
| 65 |
+
"response": result["ethical_analysis"],
|
| 66 |
+
"explanation": result["explanation"],
|
| 67 |
+
"tb_risk": result["tb_risk"],
|
| 68 |
+
"image_analysis": result["image_analysis"],
|
| 69 |
+
"audio_analysis": result["audio_analysis"],
|
| 70 |
+
"system_health": result["system_health"]
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
# Vectorize and encrypt
|
| 74 |
vectorized_query = self._vectorize_query(query)
|
| 75 |
self.secure_memory.encrypt_vector(user_id, vectorized_query)
|
| 76 |
|
| 77 |
+
# (Optional) retrieve memory
|
| 78 |
user_vectors = self.secure_memory.decrypt_vectors(user_id)
|
| 79 |
|
| 80 |
+
# Main AI processing
|
| 81 |
model_response = await self._generate_local_model_response(query)
|
| 82 |
agent_response = self.multi_agent_system.delegate_task(query)
|
| 83 |
self_reflection = self.self_reflective_ai.evaluate_response(query, model_response)
|
|
|
|
| 100 |
logger.error(f"Response generation failed: {e}")
|
| 101 |
return {"error": "Processing failed - safety protocols engaged"}
|
| 102 |
|
| 103 |
+
async def run_tb_diagnostics(self, image_path: str, audio_path: str, user_id: int) -> Dict[str, Any]:
|
| 104 |
+
"""Only runs TB analysis if explicitly requested."""
|
| 105 |
+
try:
|
| 106 |
+
result = await self.health_module.evaluate_tb_risk(image_path, audio_path, user_id)
|
| 107 |
+
logger.info(f"TB Diagnostic Result: {result}")
|
| 108 |
+
return result
|
| 109 |
+
except Exception as e:
|
| 110 |
+
logger.error(f"TB diagnostics failed: {e}")
|
| 111 |
+
return {
|
| 112 |
+
"tb_risk": "ERROR",
|
| 113 |
+
"error": str(e),
|
| 114 |
+
"image_analysis": {},
|
| 115 |
+
"audio_analysis": {},
|
| 116 |
+
"ethical_analysis": "Unable to complete TB diagnostic.",
|
| 117 |
+
"explanation": None,
|
| 118 |
+
"system_health": None
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
def _load_config(self, config_path: str) -> dict:
|
| 122 |
with open(config_path, 'r') as file:
|
| 123 |
return json.load(file)
|
|
|
|
| 142 |
|
| 143 |
def _speak_response(self, response: str):
|
| 144 |
self.speech_engine.say(response)
|
| 145 |
+
self.speech_engine.runAndWait()
|