Spaces:
Runtime error
Runtime error
| # Atlas Intelligence Universal Setup Script | |
| # Compatible with macOS on Apple Silicon (M1/M2/M3) | |
| # Created: April 16, 2025 | |
| # Exit on error, but allow for custom error handling | |
| set -o pipefail | |
| # Color codes for better output | |
| GREEN='\033[0;32m' | |
| BLUE='\033[0;34m' | |
| YELLOW='\033[0;33m' | |
| RED='\033[0;31m' | |
| BOLD='\033[1m' | |
| NC='\033[0m' # No Color | |
| # Paths to project directories | |
| OPENMANUS_DIR="$HOME/OpenManus" | |
| CASIBASE_DIR="$HOME/casibase" | |
| CYPHER_DIR="$HOME/Cypher" | |
| CLOUD_DIR="$HOME/Library/Mobile Documents/com~apple~CloudDocs/Atlas Business" | |
| AI_COMPANION_DIR="$CLOUD_DIR/AIConversationCompanion" | |
| QUANTUM_VISION_DIR="$CLOUD_DIR/QuantumVision" | |
| UNIFIED_DIR="$HOME/AtlasUnified" | |
| # Log file for the installation process | |
| LOG_FILE="$QUANTUM_VISION_DIR/install_log.txt" | |
| # Function to log messages to both stdout and log file | |
| log() { | |
| echo -e "$1" | tee -a "$LOG_FILE" | |
| } | |
| # Function to log errors | |
| log_error() { | |
| echo -e "${RED}ERROR: $1${NC}" | tee -a "$LOG_FILE" | |
| } | |
| # Function to log warnings | |
| log_warning() { | |
| echo -e "${YELLOW}WARNING: $1${NC}" | tee -a "$LOG_FILE" | |
| } | |
| # Function to log success | |
| log_success() { | |
| echo -e "${GREEN}$1${NC}" | tee -a "$LOG_FILE" | |
| } | |
| # Function to log information | |
| log_info() { | |
| echo -e "${BLUE}$1${NC}" | tee -a "$LOG_FILE" | |
| } | |
| # Clean log file | |
| > "$LOG_FILE" | |
| log_info "==============================================" | |
| log_info " Atlas Intelligence Unified Setup " | |
| log_info "==============================================" | |
| log_info "This script will set up a universal connection between:" | |
| log "- OpenManus AI Agent Framework" | |
| log "- Casibase RAG Knowledge Database" | |
| log "- Cypher" | |
| log "- AI Conversation Companion" | |
| log "- Quantum Vision" | |
| log_info "==============================================" | |
| log "" | |
| # Check if running on macOS | |
| if [[ "$(uname)" != "Darwin" ]]; then | |
| log_error "This script is designed for macOS. Exiting." | |
| exit 1 | |
| fi | |
| # Check for Apple Silicon | |
| if [[ "$(uname -m)" != "arm64" ]]; then | |
| log_warning "You're not running on Apple Silicon (M1/M2/M3). Some optimizations may not work." | |
| fi | |
| # Check for required tools | |
| log_info "Checking for required tools..." | |
| # Check for Homebrew | |
| if ! command -v brew &> /dev/null; then | |
| log_warning "Homebrew not found. Installing Homebrew..." | |
| /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" | |
| # Add Homebrew to PATH for Apple Silicon | |
| if [[ "$(uname -m)" == "arm64" ]]; then | |
| echo 'eval "$(/opt/homebrew/bin/brew shellenv)"' >> ~/.zprofile | |
| eval "$(/opt/homebrew/bin/brew shellenv)" | |
| fi | |
| else | |
| log_success "Homebrew is installed." | |
| fi | |
| # Check for Python | |
| if ! command -v python3 &> /dev/null; then | |
| log_warning "Python not found. Installing Python..." | |
| brew install [email protected] | |
| else | |
| log_success "Python is installed." | |
| PYTHON_VERSION=$(python3 --version | cut -d' ' -f2) | |
| log_info "Python version: $PYTHON_VERSION" | |
| fi | |
| # Check for Go | |
| if ! command -v go &> /dev/null; then | |
| log_warning "Go not found. Installing Go..." | |
| brew install go | |
| else | |
| log_success "Go is installed." | |
| GO_VERSION=$(go version | cut -d' ' -f3) | |
| log_info "Go version: $GO_VERSION" | |
| fi | |
| # Check for Node.js | |
| if ! command -v node &> /dev/null; then | |
| log_warning "Node.js not found. Installing Node.js..." | |
| brew install node | |
| else | |
| log_success "Node.js is installed." | |
| NODE_VERSION=$(node --version) | |
| log_info "Node.js version: $NODE_VERSION" | |
| fi | |
| # Check for required Python modules | |
| check_python_module() { | |
| if ! python3 -c "import $1" &> /dev/null; then | |
| return 1 | |
| else | |
| return 0 | |
| fi | |
| } | |
| # Create unified directory if it doesn't exist | |
| log_info "Setting up unified directory structure..." | |
| mkdir -p "$UNIFIED_DIR" | |
| mkdir -p "$UNIFIED_DIR/config" | |
| mkdir -p "$UNIFIED_DIR/logs" | |
| mkdir -p "$UNIFIED_DIR/data" | |
| # Set up Python virtual environment with error handling | |
| log_info "Setting up Python virtual environment..." | |
| if ! command -v python3 -m venv &> /dev/null; then | |
| log_warning "Python venv module not found. Installing venv..." | |
| brew reinstall [email protected] | |
| fi | |
| # Create and activate virtual environment | |
| python3 -m venv "$UNIFIED_DIR/venv" || { | |
| log_error "Failed to create virtual environment. Trying alternative approach..." | |
| pip3 install virtualenv | |
| virtualenv "$UNIFIED_DIR/venv" | |
| } | |
| # Source the virtual environment with error handling | |
| if [ -f "$UNIFIED_DIR/venv/bin/activate" ]; then | |
| source "$UNIFIED_DIR/venv/bin/activate" | |
| log_success "Virtual environment activated." | |
| else | |
| log_error "Virtual environment activation script not found. Installation may not work correctly." | |
| exit 1 | |
| fi | |
| # Upgrade pip and install wheel to avoid common installation issues | |
| log_info "Upgrading pip and installing wheel..." | |
| pip install --upgrade pip wheel setuptools | |
| # Install core dependencies that commonly cause issues if not installed first | |
| log_info "Installing core dependencies..." | |
| pip install Cython numpy==1.26.4 pandas scikit-learn | |
| # OpenManus dependencies | |
| log_info "Installing OpenManus dependencies..." | |
| if [ -f "$OPENMANUS_DIR/requirements.txt" ]; then | |
| # Try-catch for OpenManus requirements | |
| if ! pip install -r "$OPENMANUS_DIR/requirements.txt"; then | |
| log_warning "Some OpenManus dependencies failed to install. Installing essential ones individually..." | |
| # Install critical dependencies individually | |
| pip install openai tenacity pyyaml loguru | |
| fi | |
| else | |
| log_error "OpenManus requirements.txt not found. Skipping OpenManus dependencies." | |
| fi | |
| # Quantum Vision dependencies - FIX: Instead of installing as a package, install dependencies directly | |
| log_info "Installing Quantum Vision dependencies..." | |
| if [ -f "$QUANTUM_VISION_DIR/pyproject.toml" ]; then | |
| # Install specific dependencies from pyproject.toml instead of the package itself | |
| log_info "Installing specific Quantum Vision dependencies..." | |
| pip install beautifulsoup4 email-validator flask flask-sqlalchemy gunicorn | |
| pip install "openai>=1.6.0" psycopg2-binary requests | |
| pip install "spacy>=3.0.0" sqlalchemy trafilatura | |
| # Create an empty __init__.py file in the Quantum Vision directory to make it importable | |
| touch "$QUANTUM_VISION_DIR/__init__.py" | |
| log_success "Quantum Vision dependencies installed." | |
| else | |
| log_error "Quantum Vision pyproject.toml not found. Skipping Quantum Vision dependencies." | |
| fi | |
| # Cypher dependencies | |
| log_info "Installing Cypher dependencies..." | |
| if [ -f "$CYPHER_DIR/requirements.txt" ]; then | |
| # Try-catch for Cypher requirements | |
| if ! pip install -r "$CYPHER_DIR/requirements.txt"; then | |
| log_warning "Some Cypher dependencies failed to install. Installing essential ones individually..." | |
| pip install flask requests | |
| fi | |
| else | |
| log_warning "Cypher requirements.txt not found. Skipping Cypher dependencies." | |
| fi | |
| # Install FastAPI and related packages for the unified API | |
| log_info "Installing FastAPI and related packages for the unified API..." | |
| pip install fastapi uvicorn pydantic pyyaml | |
| # Set up Casibase | |
| log_info "Setting up Casibase..." | |
| if [ -d "$CASIBASE_DIR" ]; then | |
| cd "$CASIBASE_DIR" | |
| # Check if Docker is installed | |
| if ! command -v docker &> /dev/null; then | |
| log_warning "Docker not found. Installing Docker..." | |
| brew install --cask docker | |
| log_warning "Please open Docker Desktop and complete setup, then run this script again." | |
| log_warning "Skipping Casibase setup for now." | |
| else | |
| # Build and run Casibase | |
| log_info "Building Casibase..." | |
| # Only run build.sh if it exists and is executable | |
| if [ -f "./build.sh" ] && [ -x "./build.sh" ]; then | |
| # Run build.sh with error handling | |
| if ! ./build.sh; then | |
| log_warning "Casibase build failed. You may need to run it manually later." | |
| else | |
| log_success "Casibase build completed successfully." | |
| fi | |
| else | |
| log_warning "Casibase build.sh not found or not executable. Skipping build." | |
| fi | |
| fi | |
| log_success "Casibase setup completed with available components." | |
| else | |
| log_warning "Casibase directory not found. Skipping Casibase setup." | |
| fi | |
| # Return to original directory | |
| cd "$QUANTUM_VISION_DIR" | |
| # Set up AI Conversation Companion frontend | |
| log_info "Setting up AI Conversation Companion..." | |
| if [ -d "$AI_COMPANION_DIR" ]; then | |
| cd "$AI_COMPANION_DIR" | |
| # Install npm dependencies | |
| if [ -f "package.json" ]; then | |
| log_info "Installing npm dependencies for AI Conversation Companion..." | |
| # Run npm install with error handling | |
| if ! npm install; then | |
| log_warning "Some npm packages failed to install. You may need to run 'npm install' manually later." | |
| fi | |
| else | |
| log_warning "package.json not found in AI Conversation Companion. Skipping npm install." | |
| fi | |
| else | |
| log_warning "AI Conversation Companion directory not found. Skipping frontend setup." | |
| fi | |
| # Return to original directory | |
| cd "$QUANTUM_VISION_DIR" | |
| # Create unified configuration | |
| log_info "Creating unified configuration..." | |
| mkdir -p "$UNIFIED_DIR/config" | |
| cat > "$UNIFIED_DIR/config/unified_config.yaml" << EOF | |
| # Atlas Intelligence Unified Configuration | |
| version: 1.0.0 | |
| created: "$(date)" | |
| # Project Paths | |
| paths: | |
| openmanus: "$OPENMANUS_DIR" | |
| casibase: "$CASIBASE_DIR" | |
| cypher: "$CYPHER_DIR" | |
| ai_companion: "$AI_COMPANION_DIR" | |
| quantum_vision: "$QUANTUM_VISION_DIR" | |
| unified: "$UNIFIED_DIR" | |
| # Integration Settings | |
| integrations: | |
| enable_openmanus: true | |
| enable_casibase: true | |
| enable_cypher: true | |
| enable_ai_companion: true | |
| enable_quantum_vision: true | |
| # API Settings | |
| api: | |
| host: "localhost" | |
| port: 8080 | |
| debug: false | |
| enable_cors: true | |
| # Database Settings | |
| database: | |
| type: "sqlite" # For development, can be changed to mysql or postgresql | |
| path: "$UNIFIED_DIR/data/atlas_unified.db" | |
| # Logging Settings | |
| logging: | |
| level: "info" | |
| file: "$UNIFIED_DIR/logs/atlas_unified.log" | |
| max_size_mb: 10 | |
| backup_count: 5 | |
| # LLM Integration | |
| llm: | |
| provider: "openai" # Can be openai, anthropic, llama, etc. | |
| model: "gpt-4" | |
| api_key_env: "OPENAI_API_KEY" | |
| EOF | |
| # Create the unified bridge script with better error handling and diagnostics | |
| log_info "Creating unified bridge script..." | |
| cat > "$UNIFIED_DIR/unified_bridge.py" << EOF | |
| #!/usr/bin/env python3 | |
| """ | |
| Atlas Intelligence Unified Bridge | |
| This script provides a universal bridge between all Atlas Intelligence components. | |
| """ | |
| import os | |
| import sys | |
| import yaml | |
| import logging | |
| import traceback | |
| from pathlib import Path | |
| from typing import Dict, Any, Optional, Union | |
| # Configure logging first | |
| logging.basicConfig( | |
| level=logging.INFO, | |
| format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', | |
| handlers=[ | |
| logging.StreamHandler() | |
| ] | |
| ) | |
| logger = logging.getLogger("atlas_unified") | |
| # Setup paths | |
| SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) | |
| CONFIG_PATH = os.path.join(SCRIPT_DIR, "config", "unified_config.yaml") | |
| # Load configuration with error handling | |
| try: | |
| with open(CONFIG_PATH, "r") as f: | |
| config = yaml.safe_load(f) | |
| # Now that we have config, setup file logging | |
| log_file = config["logging"]["file"] | |
| os.makedirs(os.path.dirname(log_file), exist_ok=True) | |
| file_handler = logging.FileHandler(log_file) | |
| file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')) | |
| logger.addHandler(file_handler) | |
| logger.setLevel(logging.getLevelName(config["logging"]["level"].upper())) | |
| except Exception as e: | |
| logger.error(f"Failed to load configuration: {e}") | |
| logger.error(traceback.format_exc()) | |
| sys.exit(1) | |
| # Add project paths to Python path with validation | |
| for key, path in config["paths"].items(): | |
| if os.path.exists(path): | |
| sys.path.append(path) | |
| logger.info(f"Added {key} path to system path: {path}") | |
| else: | |
| logger.warning(f"Path {key} does not exist: {path}") | |
| # Try to import FastAPI with error handling | |
| try: | |
| from fastapi import FastAPI, Request, Response, status | |
| from fastapi.middleware.cors import CORSMiddleware | |
| import uvicorn | |
| except ImportError as e: | |
| logger.error(f"Failed to import required packages: {e}") | |
| logger.error("Please ensure fastapi and uvicorn are installed.") | |
| logger.error("Run: pip install fastapi uvicorn") | |
| sys.exit(1) | |
| # Initialize FastAPI app | |
| app = FastAPI(title="Atlas Intelligence Unified API") | |
| # Add CORS middleware | |
| if config["api"]["enable_cors"]: | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| @app.get("/") | |
| async def root(): | |
| """Root endpoint returning welcome message""" | |
| return { | |
| "message": "Welcome to Atlas Intelligence Unified API", | |
| "version": config["version"], | |
| "status": "operational" | |
| } | |
| @app.get("/health") | |
| async def health(): | |
| """Health check endpoint""" | |
| return {"status": "healthy"} | |
| @app.get("/status") | |
| async def status(): | |
| """Status endpoint with detailed component information""" | |
| components_status = {} | |
| # Check OpenManus status | |
| if config["integrations"]["enable_openmanus"]: | |
| components_status["openmanus"] = check_openmanus_status() | |
| else: | |
| components_status["openmanus"] = "disabled" | |
| # Check QuantumVision status | |
| if config["integrations"]["enable_quantum_vision"]: | |
| components_status["quantum_vision"] = check_quantum_vision_status() | |
| else: | |
| components_status["quantum_vision"] = "disabled" | |
| # Check Casibase status | |
| if config["integrations"]["enable_casibase"]: | |
| components_status["casibase"] = check_casibase_status() | |
| else: | |
| components_status["casibase"] = "disabled" | |
| # Check Cypher status | |
| if config["integrations"]["enable_cypher"]: | |
| components_status["cypher"] = check_cypher_status() | |
| else: | |
| components_status["cypher"] = "disabled" | |
| # Check AI Conversation Companion status | |
| if config["integrations"]["enable_ai_companion"]: | |
| components_status["ai_companion"] = check_ai_companion_status() | |
| else: | |
| components_status["ai_companion"] = "disabled" | |
| return { | |
| "status": "operational", | |
| "components": components_status, | |
| "version": config["version"] | |
| } | |
| def check_openmanus_status() -> str: | |
| """Check if OpenManus is available and working""" | |
| try: | |
| if not os.path.exists(config["paths"]["openmanus"]): | |
| return "unavailable (path not found)" | |
| # Try importing a key module | |
| sys.path.append(config["paths"]["openmanus"]) | |
| import app.llm as openmanus_llm | |
| return "available" | |
| except Exception as e: | |
| logger.error(f"Failed to check OpenManus: {e}") | |
| return f"error: {str(e)}" | |
| def check_quantum_vision_status() -> str: | |
| """Check if QuantumVision is available and working""" | |
| try: | |
| if not os.path.exists(config["paths"]["quantum_vision"]): | |
| return "unavailable (path not found)" | |
| # Try importing a key module | |
| sys.path.append(config["paths"]["quantum_vision"]) | |
| try: | |
| import nlp_processor | |
| return "available" | |
| except ImportError: | |
| # Create simple __init__.py in directory if it doesn't exist | |
| if not os.path.exists(f"{config['paths']['quantum_vision']}/__init__.py"): | |
| with open(f"{config['paths']['quantum_vision']}/__init__.py", "w") as f: | |
| f.write("# Generated by AtlasUnified installer\n") | |
| import nlp_processor | |
| return "available (fixed module imports)" | |
| return "error: could not import nlp_processor" | |
| except Exception as e: | |
| logger.error(f"Failed to check QuantumVision: {e}") | |
| return f"error: {str(e)}" | |
| def check_casibase_status() -> str: | |
| """Check if Casibase is available""" | |
| if os.path.exists(config["paths"]["casibase"]): | |
| return "path exists (service status unknown)" | |
| return "unavailable (path not found)" | |
| def check_cypher_status() -> str: | |
| """Check if Cypher is available""" | |
| if os.path.exists(config["paths"]["cypher"]): | |
| return "path exists (service status unknown)" | |
| return "unavailable (path not found)" | |
| def check_ai_companion_status() -> str: | |
| """Check if AI Conversation Companion is available""" | |
| if os.path.exists(config["paths"]["ai_companion"]): | |
| return "path exists (service status unknown)" | |
| return "unavailable (path not found)" | |
| @app.post("/query") | |
| async def query(request: Request, response: Response): | |
| """Process a query through available components""" | |
| try: | |
| data = await request.json() | |
| except Exception as e: | |
| response.status_code = status.HTTP_400_BAD_REQUEST | |
| return {"error": f"Invalid JSON: {str(e)}"} | |
| query_text = data.get("query", "") | |
| if not query_text: | |
| response.status_code = status.HTTP_400_BAD_REQUEST | |
| return {"error": "Query text is required"} | |
| logger.info(f"Received query: {query_text}") | |
| results = {} | |
| # Process with OpenManus if enabled | |
| if config["integrations"]["enable_openmanus"]: | |
| try: | |
| sys.path.append(config["paths"]["openmanus"]) | |
| try: | |
| from app.agent import Agent | |
| agent = Agent() | |
| openmanus_result = agent.run(query_text) | |
| results["openmanus"] = openmanus_result | |
| except Exception as inner_e: | |
| # Fallback to simpler interaction if full agent fails | |
| logger.warning(f"Failed to use OpenManus Agent, trying LLM directly: {inner_e}") | |
| from app import llm | |
| openmanus_result = llm.generate_text(prompt=query_text) | |
| results["openmanus"] = {"fallback_response": openmanus_result} | |
| except Exception as e: | |
| logger.error(f"OpenManus processing error: {e}") | |
| logger.error(traceback.format_exc()) | |
| results["openmanus"] = {"error": str(e)} | |
| # Process with Quantum Vision if enabled | |
| if config["integrations"]["enable_quantum_vision"]: | |
| try: | |
| sys.path.append(config["paths"]["quantum_vision"]) | |
| # Try to import and use nlp_processor | |
| try: | |
| import nlp_processor | |
| processor = nlp_processor.NLPProcessor() | |
| quantum_result = processor.process_text(query_text) | |
| results["quantum_vision"] = quantum_result | |
| except AttributeError: | |
| # Fallback to direct OpenAI if NLPProcessor class not found | |
| logger.warning("NLPProcessor not found, falling back to OpenAI integration") | |
| import openai_integration | |
| api_result = openai_integration.process_with_openai(query_text) | |
| results["quantum_vision"] = {"fallback_response": api_result} | |
| except Exception as e: | |
| logger.error(f"Quantum Vision processing error: {e}") | |
| logger.error(traceback.format_exc()) | |
| results["quantum_vision"] = {"error": str(e)} | |
| return results | |
| def main(): | |
| """Run the unified bridge server""" | |
| logger.info("Starting Atlas Intelligence Unified Bridge") | |
| try: | |
| uvicorn.run( | |
| "unified_bridge:app", | |
| host=config["api"]["host"], | |
| port=config["api"]["port"], | |
| reload=config["api"]["debug"] | |
| ) | |
| except Exception as e: | |
| logger.error(f"Failed to start server: {e}") | |
| logger.error(traceback.format_exc()) | |
| sys.exit(1) | |
| if __name__ == "__main__": | |
| main() | |
| EOF | |
| # Make the bridge script executable | |
| chmod +x "$UNIFIED_DIR/unified_bridge.py" | |
| # Create a launch script with improved error handling | |
| log_info "Creating launch script..." | |
| cat > "$UNIFIED_DIR/start_unified.sh" << EOF | |
| #!/bin/bash | |
| # Color codes for better output | |
| GREEN='\033[0;32m' | |
| BLUE='\033[0;34m' | |
| YELLOW='\033[0;33m' | |
| RED='\033[0;31m' | |
| NC='\033[0m' # No Color | |
| # Check if virtual environment exists | |
| if [ ! -f "$UNIFIED_DIR/venv/bin/activate" ]; then | |
| echo -e "${RED}Error: Virtual environment not found.${NC}" | |
| echo -e "${BLUE}Running installation repair...${NC}" | |
| # Attempt to fix the virtual environment | |
| cd "$UNIFIED_DIR" | |
| python3 -m venv venv | |
| if [ ! -f "$UNIFIED_DIR/venv/bin/activate" ]; then | |
| echo -e "${RED}Failed to create virtual environment. Please run the installation script again.${NC}" | |
| exit 1 | |
| fi | |
| fi | |
| # Activate the virtual environment | |
| source "$UNIFIED_DIR/venv/bin/activate" || { | |
| echo -e "${RED}Failed to activate virtual environment.${NC}" | |
| exit 1 | |
| } | |
| # Check if required packages are installed | |
| echo -e "${BLUE}Checking required packages...${NC}" | |
| python -c "import fastapi, uvicorn" &>/dev/null || { | |
| echo -e "${YELLOW}Some required packages are missing. Installing...${NC}" | |
| pip install fastapi uvicorn pydantic | |
| } | |
| # Change to the unified directory | |
| cd "$UNIFIED_DIR" || { | |
| echo -e "${RED}Failed to change to unified directory.${NC}" | |
| exit 1 | |
| } | |
| # Run the unified bridge | |
| echo -e "${GREEN}Starting Atlas Intelligence Unified API...${NC}" | |
| if [[ \$1 == "--debug" ]]; then | |
| echo -e "${YELLOW}Running in debug mode...${NC}" | |
| PYTHONPATH="\$PYTHONPATH:$UNIFIED_DIR" python unified_bridge.py | |
| else | |
| # Run in "production" mode with error logging | |
| PYTHONPATH="\$PYTHONPATH:$UNIFIED_DIR" python unified_bridge.py 2> "$UNIFIED_DIR/logs/error.log" | |
| fi | |
| EOF | |
| chmod +x "$UNIFIED_DIR/start_unified.sh" | |
| # Create a simple diagnostic tool for troubleshooting | |
| log_info "Creating diagnostic tool..." | |
| cat > "$UNIFIED_DIR/diagnose.py" << EOF | |
| #!/usr/bin/env python3 | |
| """ | |
| Atlas Intelligence Diagnostic Tool | |
| Checks if all components are properly configured and working. | |
| """ | |
| import os | |
| import sys | |
| import yaml | |
| import importlib | |
| import platform | |
| import subprocess | |
| from pathlib import Path | |
| def print_header(text): | |
| print(f"\n{'=' * 50}") | |
| print(f" {text}") | |
| print(f"{'=' * 50}") | |
| def print_success(text): | |
| print(f"[✓] {text}") | |
| def print_error(text): | |
| print(f"[✗] {text}") | |
| def print_warning(text): | |
| print(f"[!] {text}") | |
| def check_python(): | |
| print_header("Python Environment") | |
| print(f"Python version: {platform.python_version()}") | |
| print(f"Python executable: {sys.executable}") | |
| # Check virtual environment | |
| in_venv = sys.prefix != sys.base_prefix | |
| if in_venv: | |
| print_success(f"Running in virtual environment: {sys.prefix}") | |
| else: | |
| print_error("Not running in a virtual environment") | |
| def check_core_packages(): | |
| print_header("Core Python Packages") | |
| packages = [ | |
| "fastapi", "uvicorn", "pydantic", "yaml", "numpy", | |
| "openai", "requests", "flask", "sqlalchemy" | |
| ] | |
| for package in packages: | |
| try: | |
| pkg = importlib.import_module(package) | |
| version = getattr(pkg, "__version__", "unknown version") | |
| print_success(f"{package}: {version}") | |
| except ImportError: | |
| print_error(f"{package}: Not installed") | |
| def check_components(config): | |
| print_header("Component Status") | |
| # Check OpenManus | |
| if config["integrations"]["enable_openmanus"]: | |
| path = config["paths"]["openmanus"] | |
| if os.path.exists(path): | |
| print_success(f"OpenManus path exists: {path}") | |
| # Try to import a key module | |
| try: | |
| sys.path.append(path) | |
| import app.llm | |
| print_success("OpenManus modules can be imported") | |
| except ImportError as e: | |
| print_error(f"Cannot import OpenManus modules: {e}") | |
| else: | |
| print_error(f"OpenManus path does not exist: {path}") | |
| # Check QuantumVision | |
| if config["integrations"]["enable_quantum_vision"]: | |
| path = config["paths"]["quantum_vision"] | |
| if os.path.exists(path): | |
| print_success(f"QuantumVision path exists: {path}") | |
| # Try to import a key module | |
| try: | |
| sys.path.append(path) | |
| import nlp_processor | |
| print_success("QuantumVision modules can be imported") | |
| except ImportError as e: | |
| print_error(f"Cannot import QuantumVision modules: {e}") | |
| else: | |
| print_error(f"QuantumVision path does not exist: {path}") | |
| # Check Casibase | |
| if config["integrations"]["enable_casibase"]: | |
| path = config["paths"]["casibase"] | |
| if os.path.exists(path): | |
| print_success(f"Casibase path exists: {path}") | |
| else: | |
| print_error(f"Casibase path does not exist: {path}") | |
| # Check other components | |
| for component in ["cypher", "ai_companion"]: | |
| if config["integrations"][f"enable_{component}"]: | |
| path = config["paths"][component] | |
| if os.path.exists(path): | |
| print_success(f"{component} path exists: {path}") | |
| else: | |
| print_error(f"{component} path does not exist: {path}") | |
| def check_connectivity(): | |
| print_header("API Connectivity") | |
| host = "localhost" | |
| port = 8080 | |
| # Check if something is already running on the port | |
| try: | |
| import socket | |
| s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) | |
| s.settimeout(1) | |
| result = s.connect_ex((host, port)) | |
| if result == 0: | |
| print_success(f"Port {port} is open (API may already be running)") | |
| else: | |
| print_warning(f"Port {port} is not in use (API is not running)") | |
| s.close() | |
| except Exception as e: | |
| print_error(f"Failed to check port: {e}") | |
| def main(): | |
| print_header("Atlas Intelligence Diagnostic Tool") | |
| print(f"Running diagnostics on: {os.path.abspath('.')}") | |
| # Check Python environment | |
| check_python() | |
| # Check core packages | |
| check_core_packages() | |
| # Load configuration | |
| config_path = Path("config/unified_config.yaml") | |
| if config_path.exists(): | |
| try: | |
| with open(config_path, "r") as f: | |
| config = yaml.safe_load(f) | |
| print_success("Configuration loaded successfully") | |
| # Check components | |
| check_components(config) | |
| except Exception as e: | |
| print_error(f"Failed to load configuration: {e}") | |
| else: | |
| print_error(f"Configuration file not found: {config_path}") | |
| # Check connectivity | |
| check_connectivity() | |
| print_header("Diagnostic Complete") | |
| print("To start the API server, run: ./start_unified.sh") | |
| print("For detailed logs, check the logs directory") | |
| if __name__ == "__main__": | |
| main() | |
| EOF | |
| chmod +x "$UNIFIED_DIR/diagnose.py" | |
| # Create a repair script for common issues | |
| log_info "Creating repair script..." | |
| cat > "$UNIFIED_DIR/repair.sh" << EOF | |
| #!/bin/bash | |
| # Color codes for better output | |
| GREEN='\033[0;32m' | |
| BLUE='\033[0;34m' | |
| YELLOW='\033[0;33m' | |
| RED='\033[0;31m' | |
| NC='\033[0m' # No Color | |
| echo -e "${BLUE}==============================================${NC}" | |
| echo -e "${BLUE} Atlas Intelligence Repair Tool ${NC}" | |
| echo -e "${BLUE}==============================================${NC}" | |
| # Check if running in the correct directory | |
| if [ ! -f "./config/unified_config.yaml" ]; then | |
| echo -e "${RED}Error: Run this script from the AtlasUnified directory.${NC}" | |
| exit 1 | |
| fi | |
| # Repair virtual environment | |
| echo -e "${BLUE}Repairing virtual environment...${NC}" | |
| rm -rf ./venv | |
| python3 -m venv venv | |
| source ./venv/bin/activate | |
| # Install core dependencies | |
| echo -e "${BLUE}Installing core dependencies...${NC}" | |
| pip install --upgrade pip wheel setuptools | |
| pip install fastapi uvicorn pydantic pyyaml numpy | |
| pip install openai requests flask sqlalchemy | |
| # Fix permissions | |
| echo -e "${BLUE}Fixing permissions...${NC}" | |
| chmod +x ./unified_bridge.py | |
| chmod +x ./start_unified.sh | |
| chmod +x ./diagnose.py | |
| # Create any missing directories | |
| echo -e "${BLUE}Creating missing directories...${NC}" | |
| mkdir -p ./logs | |
| mkdir -p ./data | |
| # Run diagnostics | |
| echo -e "${BLUE}Running diagnostics...${NC}" | |
| python ./diagnose.py | |
| echo -e "${GREEN}Repair complete. You can now run ./start_unified.sh${NC}" | |
| EOF | |
| chmod +x "$UNIFIED_DIR/repair.sh" | |
| # Make sure we're back in the original directory | |
| cd "$QUANTUM_VISION_DIR" | |
| log_success "Installation complete!" | |
| log_info "==============================================" | |
| log_info "To start the unified system, run:" | |
| log_success "cd $UNIFIED_DIR && ./start_unified.sh" | |
| log_info "==============================================" | |
| log_info "For diagnostics and troubleshooting:" | |
| log_success "cd $UNIFIED_DIR && python diagnose.py" | |
| log_info "==============================================" | |
| log_info "To repair the installation if needed:" | |
| log_success "cd $UNIFIED_DIR && ./repair.sh" | |
| log_info "==============================================" | |
| log_info "The unified API will be available at:" | |
| log_success "http://localhost:8080" | |
| log_info "==============================================" | |
| log_info "Installation log saved to: $LOG_FILE" | |
| log_info "==============================================" |