diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml deleted file mode 100644 index 6398cc7..0000000 --- a/.github/workflows/codeql.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: "CodeQL Security Analysis" - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - schedule: - # Run every Monday at 6:00 AM UTC - - cron: '0 6 * * 1' - -jobs: - analyze: - name: Analyze - runs-on: ubuntu-latest - permissions: - actions: read - contents: read - security-events: write - - strategy: - fail-fast: false - matrix: - language: [ 'python' ] - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Initialize CodeQL - uses: github/codeql-action/init@v4 - with: - languages: ${{ matrix.language }} - # Use default queries plus security-extended - queries: security-extended - - - name: Autobuild - uses: github/codeql-action/autobuild@v4 - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v4 - with: - category: "/language:${{ matrix.language }}" diff --git a/.gitignore b/.gitignore index 15b0ec7..fb85ab2 100644 --- a/.gitignore +++ b/.gitignore @@ -42,7 +42,6 @@ MANIFEST # ============================== # PyInstaller # ============================== -# Usually contains temporary files from pyinstaller builds *.manifest *.spec diff --git a/cortex/cli.py b/cortex/cli.py index 17004c6..b7171f1 100644 --- a/cortex/cli.py +++ b/cortex/cli.py @@ -12,6 +12,11 @@ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) +# Add src path for intent detection modules +src_path = os.path.join(os.path.dirname(__file__), '..', 'src') +if os.path.exists(src_path): + sys.path.insert(0, src_path) + from LLM.interpreter import CommandInterpreter from cortex.coordinator import InstallationCoordinator, StepStatus from cortex.installation_history import ( @@ -198,6 +203,64 @@ def install(self, software: str, execute: bool = False, dry_run: bool = False): try: self._print_status("🧠", "Understanding request...") + + # Try to use intent detection if available (for showing installation plan) + use_intent_detection = False + try: + from intent.detector import IntentDetector + from intent.planner import InstallationPlanner + from intent.clarifier import Clarifier + use_intent_detection = True + except ImportError: + self._debug("Intent detection modules not available, using fallback") + + # Show intent-based plan and get confirmation (for any package) + if use_intent_detection and not execute and not dry_run: + detector = IntentDetector() + planner = InstallationPlanner() + clarifier = Clarifier() + + intents = detector.detect(software) + + # Check for clarification needs + clarification = clarifier.needs_clarification(intents, software) + if clarification: + cx_print(f"\nā“ {clarification}", "warning") + print() + cx_print("Please provide more specific details in your request.", "info") + cx_print("Example: 'cortex install pytorch and tensorflow'", "info") + print() + cx_print("Or press Ctrl+C to cancel.", "info") + try: + response = input("\nYour clarification: ").strip() + if response: + # Retry with the clarified request + software = response + intents = detector.detect(software) + clarification = clarifier.needs_clarification(intents, software) + if clarification: + cx_print(f"\nā“ Still need clarification: {clarification}", "warning") + cx_print("Falling back to LLM for command generation...", "info") + else: + cx_print("No input provided. Falling back to LLM...", "info") + except (KeyboardInterrupt, EOFError): + print("\n") + cx_print("Operation cancelled by user.", "info") + return 0 + + # Build plan (even if no intents detected, we'll show LLM-generated commands) + plan = planner.build_plan(intents) + + # Always show plan if we have intents, otherwise fall through to LLM + if plan and len(plan) > 1: # More than just verification step + cx_print("\nšŸ“‹ Installation Plan:", "info") + for i, step in enumerate(plan, 1): + print(f" {i}. {step}") + print() + + # For ANY request (not just ML), generate commands via LLM and ask confirmation + # This happens whether intents were detected or not + interpreter = CommandInterpreter(api_key=api_key, provider=provider) @@ -215,7 +278,32 @@ def install(self, software: str, execute: bool = False, dry_run: bool = False): # Extract packages from commands for tracking packages = history._extract_packages_from_commands(commands) - + + # Show generated commands and ask for confirmation (Issue #53) + if not execute and not dry_run: + print("\nGenerated commands:") + for i, cmd in enumerate(commands, 1): + print(f" {i}. {cmd}") + + # Ask for confirmation before executing + print() + try: + response = input("Proceed with plan? [Y/n]: ").strip().lower() + if response == 'n' or response == 'no': + cx_print("Installation cancelled by user.", "info") + return 0 + elif response == '' or response == 'y' or response == 'yes': + # User confirmed, proceed with execution + execute = True + cx_print("\nProceeding with installation...", "success") + else: + cx_print("Invalid response. Installation cancelled.", "error") + return 1 + except (KeyboardInterrupt, EOFError): + print("\n") + cx_print("Installation cancelled by user.", "info") + return 0 + # Record installation start if execute or dry_run: install_id = history.record_installation( @@ -224,6 +312,13 @@ def install(self, software: str, execute: bool = False, dry_run: bool = False): commands, start_time ) + + if not dry_run: + self._print_status("āš™ļø", f"Installing {software}...") + print("\nGenerated commands:") + for i, cmd in enumerate(commands, 1): + print(f" {i}. {cmd}") + self._print_status("āš™ļø", f"Installing {software}...") print("\nGenerated commands:") diff --git a/requirements-dev.txt b/requirements-dev.txt index 5061e23..e08252a 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,6 +1,21 @@ # Development Dependencies pytest>=7.0.0 pytest-cov>=4.0.0 +pytest-mock>=3.10.0 + +# Code Quality +black>=23.0.0 +pylint>=2.17.0 +mypy>=1.0.0 + +# Security +bandit>=1.7.0 +safety>=2.3.0 + +# Documentation +sphinx>=6.0.0 +sphinx-rtd-theme>=1.0.0 +PyYAML==6.0.3 black>=24.0.0 ruff>=0.8.0 isort>=5.13.0 diff --git a/requirements.txt b/requirements.txt index 4077f05..27d29f0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,3 +12,4 @@ pyyaml>=6.0.0 # Type hints for older Python versions typing-extensions>=4.0.0 +PyYAML==6.0.3 diff --git a/cortex-cleanup.sh b/scripts/cortex-cleanup.sh old mode 100755 new mode 100644 similarity index 100% rename from cortex-cleanup.sh rename to scripts/cortex-cleanup.sh diff --git a/src/intent/__init__.py b/src/intent/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/intent/clarifier.py b/src/intent/clarifier.py new file mode 100644 index 0000000..9d987cc --- /dev/null +++ b/src/intent/clarifier.py @@ -0,0 +1,36 @@ +# clarifier.py + +from typing import List, Optional +from intent.detector import Intent + +class Clarifier: + """ + Checks if the detected intents have missing information. + Returns a clarifying question if needed. + """ + + def needs_clarification(self, intents: List[Intent], text: str) -> Optional[str]: + text = text.lower() + + # 1. If user mentions "gpu" but has not specified which GPU → ask + if "gpu" in text and not any(i.target in ["cuda", "pytorch", "tensorflow"] for i in intents): + return "Do you have an NVIDIA GPU? (Needed for CUDA/PyTorch/TensorFlow installation)" + + # 2. If user says "machine learning tools" but nothing specific + generic_terms = ["ml", "machine learning", "deep learning", "ai tools"] + if any(term in text for term in generic_terms) and len(intents) == 0: + return "Which ML frameworks do you need? (PyTorch, TensorFlow, JupyterLab...)" + + # 3. If user asks to install CUDA but no GPU exists in context + if any(i.target == "cuda" for i in intents) and "gpu" not in text: + return "Installing CUDA requires an NVIDIA GPU. Do you have one?" + + # 4. If package versions are missing (later we can add real version logic) + # Only ask about GPU/CPU version if user hasn't already specified + if "torch" in text and "version" not in text: + # Don't ask if user already mentioned GPU or CUDA + if not any(term in text for term in ["gpu", "cuda", "nvidia", "graphics"]): + return "Do you need the GPU version or CPU version of PyTorch?" + + # 5. Otherwise no clarification needed + return None diff --git a/src/intent/context.py b/src/intent/context.py new file mode 100644 index 0000000..c2e6dbe --- /dev/null +++ b/src/intent/context.py @@ -0,0 +1,69 @@ +# context.py + +from typing import List, Optional +from intent.detector import Intent + +class SessionContext: + """ + Stores context from previous user interactions. + This is needed for Issue #53: + 'Uses context from previous commands' + """ + + def __init__(self): + self.detected_gpu: Optional[str] = None + self.previous_intents: List[Intent] = [] + self.installed_packages: List[str] = [] + self.clarifications: List[str] = [] + + # ------------------- + # GPU CONTEXT + # ------------------- + + def set_gpu(self, gpu_name: str): + self.detected_gpu = gpu_name + + def get_gpu(self) -> Optional[str]: + return self.detected_gpu + + # ------------------- + # INTENT CONTEXT + # ------------------- + + def add_intents(self, intents: List[Intent]): + self.previous_intents.extend(intents) + + def get_previous_intents(self) -> List[Intent]: + return self.previous_intents + + # ------------------- + # INSTALLED PACKAGES + # ------------------- + + def add_installed(self, pkg: str): + if pkg not in self.installed_packages: + self.installed_packages.append(pkg) + + def is_installed(self, pkg: str) -> bool: + return pkg in self.installed_packages + + # ------------------- + # CLARIFICATIONS + # ------------------- + + def add_clarification(self, question: str): + self.clarifications.append(question) + + def get_clarifications(self) -> List[str]: + return self.clarifications + + # ------------------- + # RESET CONTEXT + # ------------------- + + def reset(self): + """Reset context (new session)""" + self.detected_gpu = None + self.previous_intents = [] + self.installed_packages = [] + self.clarifications = [] diff --git a/src/intent/detector.py b/src/intent/detector.py new file mode 100644 index 0000000..5093649 --- /dev/null +++ b/src/intent/detector.py @@ -0,0 +1,53 @@ +# detector.py + +from dataclasses import dataclass +from typing import List, Optional, ClassVar + +@dataclass +class Intent: + action: str + target: str + details: Optional[dict] = None + +class IntentDetector: + """ + Extracts high-level installation intents from natural language requests. + """ + + COMMON_PACKAGES: ClassVar[dict[str, List[str]]] = { + "cuda": ["cuda", "nvidia toolkit"], + "pytorch": ["pytorch", "torch"], + "tensorflow": ["tensorflow", "tf"], + "jupyter": ["jupyter", "jupyterlab", "notebook"], + "cudnn": ["cudnn"], + "python": ["python", "python3"], + "docker": ["docker"], + "nodejs": ["node", "nodejs", "npm"], + "git": ["git"], + "gpu": ["gpu", "graphics card", "rtx", "nvidia"] + } + + def detect(self, text: str) -> List[Intent]: + text = text.lower() + intents = [] + + # 1. Rule-based keyword detection (skip GPU to avoid duplicate install intent) + for pkg, keywords in self.COMMON_PACKAGES.items(): + if pkg == "gpu": + continue # GPU handled separately below + if any(k in text for k in keywords): + intents.append(Intent(action="install", target=pkg)) + + # 2. Look for verify steps + if "verify" in text or "check" in text: + intents.append(Intent(action="verify", target="installation")) + + # 3. GPU configure intent (use all GPU synonyms) + gpu_keywords = self.COMMON_PACKAGES.get("gpu", ["gpu"]) + if any(k in text for k in gpu_keywords) and not any( + i.action == "configure" and i.target == "gpu" + for i in intents + ): + intents.append(Intent(action="configure", target="gpu")) + + return intents diff --git a/src/intent/llm_agent.py b/src/intent/llm_agent.py new file mode 100644 index 0000000..8a4eb72 --- /dev/null +++ b/src/intent/llm_agent.py @@ -0,0 +1,162 @@ +# src/intent/llm_agent.py + +# ------------------------------- +# Safe import of anthropic SDK +# ------------------------------- +try: + from anthropic import Anthropic +except ImportError: + Anthropic = None + +from intent.detector import IntentDetector, Intent +from intent.planner import InstallationPlanner +from intent.clarifier import Clarifier +from intent.context import SessionContext + + +class LLMIntentAgent: + """ + High-level orchestrator combining: + - rule-based intent detection + - optional LLM-enhanced interpretation + - planning & optimization + - clarification handling + - session context + """ + + def __init__(self, api_key: str | None = None, + model: str = "claude-3-5-sonnet-20240620"): + + # LLM is enabled ONLY if SDK + API key is available + if Anthropic is None or api_key is None: + self.llm = None + else: + self.llm = Anthropic(api_key=api_key) + + self.model = model + + self.detector = IntentDetector() + self.planner = InstallationPlanner() + self.clarifier = Clarifier() + self.context = SessionContext() + + # ---------------------------------------------- + # Main request handler + # ---------------------------------------------- + def process(self, text: str): + # 1. Rule-based intent detection + intents = self.detector.detect(text) + + # 2. Ask clarification if needed + clarifying_q = self.clarifier.needs_clarification(intents, text) + if clarifying_q: + self.context.add_clarification(clarifying_q) + return {"clarification_needed": clarifying_q} + + # 3. If LLM is unavailable → fallback mode + if self.llm is None: + self.context.add_intents(intents) + return { + "intents": intents, + "plan": self.planner.build_plan(intents), + "suggestions": [], + "gpu": self.context.get_gpu() + } + + # 4. Improve intents using LLM (safe) + try: + improved_intents = self.enhance_intents_with_llm(text, intents) + except Exception: + improved_intents = intents + + # Save them to context + self.context.add_intents(improved_intents) + + # 5. Build installation plan + plan = self.planner.build_plan(improved_intents) + + # 6. Optional suggestions from LLM (safe) + try: + suggestions = self.suggest_optimizations(text) + except Exception: + suggestions = [] + + return { + "intents": improved_intents, + "plan": plan, + "suggestions": suggestions, + "gpu": self.context.get_gpu() + } + + # ---------------------------------------------- + # LLM enhancement of intents + # ---------------------------------------------- + def enhance_intents_with_llm(self, text: str, intents: list[Intent]) -> list[Intent]: + + prompt = f""" +You are an installation-intent expert. Convert the user request into structured intents. + +User request: "{text}" + +Initial intents detected: +{[str(i) for i in intents]} + +Return improvements or extra intents. +Format: "install: package" or "configure: component" +""" + + # add explicit timeout to avoid long hangs + response = self.llm.with_options(timeout=30.0).messages.create( + model=self.model, + max_tokens=300, + messages=[{"role": "user", "content": prompt}] + ) + + # ---- Safety check ---- + if not getattr(response, "content", None) or not hasattr(response.content[0], "text"): + return intents + + llm_output = response.content[0].text.lower().split("\n") + + new_intents = intents[:] + + for line in llm_output: + if "install:" in line: + pkg = line.replace("install:", "").strip() + if pkg: + new_intents.append(Intent("install", pkg)) + elif "configure:" in line: + target = line.replace("configure:", "").strip() + if target: + new_intents.append(Intent("configure", target)) + elif "verify:" in line: + target = line.replace("verify:", "").strip() + if target: + new_intents.append(Intent("verify", target)) + + return new_intents + + # ---------------------------------------------- + # LLM optimization suggestions + # ---------------------------------------------- + def suggest_optimizations(self, text: str) -> list[str]: + + prompt = f""" +User request: "{text}" + +Suggest optional tools to improve ML installation. +Examples: Conda, VSCode extensions, CUDA toolkit managers, Docker, Anaconda. +Return bullet list only. +""" + + response = self.llm.with_options(timeout=30.0).messages.create( + model=self.model, + max_tokens=150, + messages=[{"role": "user", "content": prompt}] + ) + + # ---- Safety check ---- + if not getattr(response, "content", None) or not hasattr(response.content[0], "text"): + return [] + + return [line.strip() for line in response.content[0].text.strip().split("\n") if line.strip()] diff --git a/src/intent/planner.py b/src/intent/planner.py new file mode 100644 index 0000000..453fc35 --- /dev/null +++ b/src/intent/planner.py @@ -0,0 +1,63 @@ +# planner.py + +from typing import List +from intent.detector import Intent + +class InstallationPlanner: + + GPU_PACKAGES = ["cuda", "cudnn", "pytorch", "tensorflow"] + + def build_plan(self, intents: List[Intent]) -> List[str]: + plan = [] + installed = set() + + # 1. If GPU-related intents exist → add GPU detection + has_gpu = any(i.target in self.GPU_PACKAGES or i.target == "gpu" for i in intents) + if has_gpu: + plan.append("Detect GPU: Run `nvidia-smi` or PCI scan") + + # 2. Add installation steps based on intent order + for intent in intents: + if intent.action == "install" and intent.target not in installed: + + if intent.target == "cuda": + plan.append("Install CUDA 12.3 + drivers") + + elif intent.target == "cudnn": + plan.append("Install cuDNN (matching CUDA version)") + + elif intent.target == "pytorch": + plan.append("Install PyTorch (GPU support)") + + elif intent.target == "tensorflow": + plan.append("Install TensorFlow (GPU support)") + + elif intent.target == "jupyter": + plan.append("Install JupyterLab") + + elif intent.target == "python": + plan.append("Install Python 3") + + elif intent.target == "docker": + plan.append("Install Docker") + + elif intent.target == "nodejs": + plan.append("Install Node.js and npm") + + elif intent.target == "git": + plan.append("Install Git") + + elif intent.target == "gpu": + # GPU setup is handled by CUDA/cuDNN + pass + + installed.add(intent.target) + + # 3. Add GPU configuration if needed + if has_gpu: + plan.append("Configure GPU acceleration environment") + + # 4. Add verification step + plan.append("Verify installation and GPU acceleration") + + return plan diff --git a/src/test_clarifier.py b/src/test_clarifier.py new file mode 100644 index 0000000..a16d3f8 --- /dev/null +++ b/src/test_clarifier.py @@ -0,0 +1,12 @@ +from intent.detector import IntentDetector +from intent.clarifier import Clarifier + +def test_clarifier_gpu_missing(): + d = IntentDetector() + c = Clarifier() + + text = "I want to run ML models" + intents = d.detect(text) + + question = c.needs_clarification(intents, text) + assert question is not None diff --git a/src/test_context.py b/src/test_context.py new file mode 100644 index 0000000..c4e521f --- /dev/null +++ b/src/test_context.py @@ -0,0 +1,13 @@ +from intent.context import SessionContext +from intent.detector import Intent + +def test_context_storage(): + ctx = SessionContext() + ctx.set_gpu("NVIDIA RTX 4090") + + ctx.add_intents([Intent("install", "cuda")]) + ctx.add_installed("cuda") + + assert ctx.get_gpu() == "NVIDIA RTX 4090" + assert ctx.is_installed("cuda") is True + assert len(ctx.get_previous_intents()) == 1 diff --git a/src/test_intent_detection.py b/src/test_intent_detection.py new file mode 100644 index 0000000..5dbecaa --- /dev/null +++ b/src/test_intent_detection.py @@ -0,0 +1,15 @@ +from intent.detector import IntentDetector, Intent + +def test_detector_basic(): + d = IntentDetector() + intents = d.detect("Install CUDA and PyTorch for GPU") + + targets = {i.target for i in intents} + assert "cuda" in targets + assert "pytorch" in targets + assert "gpu" in targets + +def test_detector_empty(): + d = IntentDetector() + intents = d.detect("Hello world, nothing here") + assert intents == [] diff --git a/src/test_llm_agent.py b/src/test_llm_agent.py new file mode 100644 index 0000000..f1be07a --- /dev/null +++ b/src/test_llm_agent.py @@ -0,0 +1,28 @@ +from intent.llm_agent import LLMIntentAgent + +class MockMessages: + def create(self, **kwargs): + class Response: + class Content: + text = "install: tensorflow\ninstall: jupyter" + content = [Content()] + return Response() + +class MockLLM: + def __init__(self): + self.messages = MockMessages() + +def test_llm_agent_mocked(): + agent = LLMIntentAgent(api_key="fake-key") + + # Replace real LLM with mock + agent.llm = MockLLM() + + # Disable clarification during testing + agent.clarifier.needs_clarification = lambda *a, **k: None + + result = agent.process("Install ML tools on GPU") + + assert "plan" in result + assert len(result["plan"]) > 0 + assert "suggestions" in result diff --git a/src/test_planner.py b/src/test_planner.py new file mode 100644 index 0000000..fde7b15 --- /dev/null +++ b/src/test_planner.py @@ -0,0 +1,16 @@ +from intent.detector import Intent +from intent.planner import InstallationPlanner + +def test_planner_cuda_pipeline(): + planner = InstallationPlanner() + intents = [ + Intent("install", "cuda"), + Intent("install", "pytorch"), + Intent("configure", "gpu") + ] + plan = planner.build_plan(intents) + + assert "Install CUDA 12.3 + drivers" in plan + assert "Install PyTorch (GPU support)" in plan + assert "Configure GPU acceleration environment" in plan + assert plan[-1] == "Verify installation and GPU acceleration" diff --git a/test/test_cli.py b/test/test_cli.py index 635ad06..547d797 100644 --- a/test/test_cli.py +++ b/test/test_cli.py @@ -13,21 +13,25 @@ class TestCortexCLI(unittest.TestCase): def setUp(self): self.cli = CortexCLI() - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) - def test_get_api_key_openai(self): + @patch.dict(os.environ, {'OPENAI_API_KEY': 'sk-test-key'}) + @patch('cortex.cli.validate_api_key', return_value=(True, 'openai', None)) + def test_get_api_key_openai(self, mock_validate): api_key = self.cli._get_api_key() - self.assertEqual(api_key, 'test-key') + self.assertEqual(api_key, 'sk-test-key') - @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'test-claude-key', 'OPENAI_API_KEY': ''}, clear=True) - def test_get_api_key_claude(self): + @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'sk-ant-test-key', 'OPENAI_API_KEY': ''}, clear=True) + @patch('cortex.cli.validate_api_key', return_value=(True, 'claude', None)) + def test_get_api_key_claude(self, mock_validate): api_key = self.cli._get_api_key() - self.assertEqual(api_key, 'test-claude-key') + self.assertEqual(api_key, 'sk-ant-test-key') @patch.dict(os.environ, {}, clear=True) + @patch('cortex.cli.validate_api_key', return_value=(False, None, 'No API key found')) @patch('sys.stderr') - def test_get_api_key_not_found(self, mock_stderr): + def test_get_api_key_not_found(self, mock_stderr, mock_validate): api_key = self.cli._get_api_key() - self.assertIsNone(api_key) + # Now returns 'ollama-local' as fallback + self.assertEqual(api_key, 'ollama-local') @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) def test_get_provider_openai(self): @@ -59,9 +63,10 @@ def test_install_no_api_key(self): result = self.cli.install("docker") self.assertEqual(result, 1) - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + @patch.dict(os.environ, {'OPENAI_API_KEY': 'sk-test-key'}) + @patch('cortex.cli.validate_api_key', return_value=(True, 'openai', None)) @patch('cortex.cli.CommandInterpreter') - def test_install_dry_run(self, mock_interpreter_class): + def test_install_dry_run(self, mock_interpreter_class, mock_validate): mock_interpreter = Mock() mock_interpreter.parse.return_value = ["apt update", "apt install docker"] mock_interpreter_class.return_value = mock_interpreter @@ -71,9 +76,11 @@ def test_install_dry_run(self, mock_interpreter_class): self.assertEqual(result, 0) mock_interpreter.parse.assert_called_once_with("install docker") - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + @patch.dict(os.environ, {'OPENAI_API_KEY': 'sk-test-key'}) + @patch('cortex.cli.validate_api_key', return_value=(True, 'openai', None)) + @patch('builtins.input', return_value='n') @patch('cortex.cli.CommandInterpreter') - def test_install_no_execute(self, mock_interpreter_class): + def test_install_no_execute(self, mock_interpreter_class, mock_input, mock_validate): mock_interpreter = Mock() mock_interpreter.parse.return_value = ["apt update", "apt install docker"] mock_interpreter_class.return_value = mock_interpreter @@ -83,10 +90,11 @@ def test_install_no_execute(self, mock_interpreter_class): self.assertEqual(result, 0) mock_interpreter.parse.assert_called_once() - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + @patch.dict(os.environ, {'OPENAI_API_KEY': 'sk-test-key'}) + @patch('cortex.cli.validate_api_key', return_value=(True, 'openai', None)) @patch('cortex.cli.CommandInterpreter') @patch('cortex.cli.InstallationCoordinator') - def test_install_with_execute_success(self, mock_coordinator_class, mock_interpreter_class): + def test_install_with_execute_success(self, mock_coordinator_class, mock_interpreter_class, mock_validate): mock_interpreter = Mock() mock_interpreter.parse.return_value = ["echo test"] mock_interpreter_class.return_value = mock_interpreter @@ -170,7 +178,8 @@ def test_install_unexpected_error(self, mock_interpreter_class): @patch('sys.argv', ['cortex']) def test_main_no_command(self): result = main() - self.assertEqual(result, 1) + # Now returns 0 and shows help instead of error + self.assertEqual(result, 0) @patch('sys.argv', ['cortex', 'install', 'docker']) @patch('cortex.cli.CortexCLI.install')