diff --git a/.github/workflows/ci-with-pipegenie.yml b/.github/workflows/ci-with-pipegenie.yml new file mode 100644 index 000000000..edec316f2 --- /dev/null +++ b/.github/workflows/ci-with-pipegenie.yml @@ -0,0 +1,66 @@ +name: CI Pipeline with PipeGenie Integration + +on: + push: + branches: [ main, develop, 'feature/**' ] + pull_request: + branches: [ main ] + +jobs: + test-and-build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + cache: 'pip' + + - name: Install dependencies + run: | + pip install --upgrade pip + pip install -r requirements.txt + + - name: Run tests + run: | + pytest tests/ --tb=short -v --junitxml=test-results.xml + + - name: Build + run: | + echo "Build step here..." + + # This job notifies PipeGenie on failure (via webhook) + notify-pipegenie: + runs-on: ubuntu-latest + needs: test-and-build + if: failure() # Only run if the CI job fails + + steps: + #- name: Notify PipeGenie of failure + # run: | + curl -X POST "${{ secrets.PIPEGENIE_URL }}/api/webhook/github" \ + -H "Content-Type: application/json" \ + -H "X-GitHub-Event: workflow_run" \ + -H "X-Hub-Signature-256: sha256=${{ secrets.PIPEGENIE_WEBHOOK_SECRET }}" \ + -d '{ + "action": "completed", + "workflow_run": { + "id": "${{ github.run_id }}", + "name": "${{ github.workflow }}", + "head_branch": "${{ github.ref_name }}", + "head_sha": "${{ github.sha }}", + "conclusion": "failure", + "html_url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}", + "head_commit": { + "message": "${{ github.event.head_commit.message }}" + }, + "actor": { "login": "${{ github.actor }}" } + }, + "repository": { + "full_name": "${{ github.repository }}", + "name": "${{ github.event.repository.name }}" + } + }' diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..a1c68dc31 --- /dev/null +++ b/.gitignore @@ -0,0 +1,184 @@ +# OS files +.DS_Store +Thumbs.db + +# Editor and IDE +.vscode/ +.idea/ +*.swp +*.swo + +# Environment and secrets +.env +.env.* +!.env.example + +# Logs +*.log +logs/ + +# Python +__pycache__/ +*.py[cod] +*$py.class +.pytest_cache/ +.mypy_cache/ +.ruff_cache/ +.coverage +.coverage.* +htmlcov/ +.venv/ +venv/ +env/ + +# Backend artifacts +backend/.pytest_cache/ +backend/.mypy_cache/ +backend/.ruff_cache/ + +# Node / Vite (frontend) +node_modules/ +frontend/node_modules/ +frontend/dist/ +frontend/.vite/ +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* + +# Build and cache outputs +dist/ +build/ +.tmp/ +tmp/ +.cache/ + +# Docker and compose overrides +*.pid +*.seed +docker-compose.override.yml + +# SigNoz-specific ignore rules +/signoz/deploy/docker/environment_tiny/common_test +/signoz/frontend/.pnp +/signoz/frontend/i18n-translations-hash.json +/signoz/**/*.pnp.js +/signoz/frontend/coverage +/signoz/frontend/build +/signoz/frontend/.yarnclean +/signoz/frontend/.temp_cache +/signoz/frontend/test-results +/signoz/frontend/src/constants/env.ts + +/signoz/**/build +/signoz/**/storage +/signoz/**/locust-scripts/__pycache__/ +/signoz/**/__debug_bin + +/signoz/pkg/query-service/signoz.db +/signoz/pkg/query-service/tests/test-deploy/data/ +/signoz/ee/query-service/signoz.db +/signoz/ee/query-service/tests/test-deploy/data/ + +# local data (SigNoz) +/signoz/**/*.backup +/signoz/**/*.db +/signoz/**/*.db-shm +/signoz/**/*.db-wal +/signoz/**/db +/signoz/deploy/docker/clickhouse-setup/data/ +/signoz/deploy/docker-swarm/clickhouse-setup/data/ +/signoz/bin/ +/signoz/.local/ +/signoz/*/query-service/queries.active +/signoz/ee/query-service/db + +# e2e (SigNoz) +/signoz/e2e/node_modules/ +/signoz/e2e/test-results/ +/signoz/e2e/blob-report/ +/signoz/e2e/.auth + +# go (SigNoz) +/signoz/vendor/ +/signoz/**/main/** +/signoz/**/__debug_bin** + +# git-town (SigNoz) +/signoz/.git-branches.toml + +# ignore user_scripts fetched by init-clickhouse (SigNoz) +/signoz/deploy/common/clickhouse/user_scripts/ + +/signoz/queries.active + +# tmp (SigNoz) +/signoz/**/tmp/** + +# .devenv tmp files (SigNoz) +/signoz/.devenv/**/tmp/** +/signoz/.qodo +/signoz/.dev + +# Additional editor/tooling rules from SigNoz +/signoz/.zed + +# Additional Python ecosystem rules from SigNoz +/signoz/**/*.so +/signoz/.Python +/signoz/develop-eggs/ +/signoz/downloads/ +/signoz/eggs/ +/signoz/.eggs/ +/signoz/lib64/ +/signoz/parts/ +/signoz/sdist/ +/signoz/var/ +/signoz/wheels/ +/signoz/share/python-wheels/ +/signoz/**/*.egg-info/ +/signoz/.installed.cfg +/signoz/**/*.egg +/signoz/**/MANIFEST +/signoz/**/*.manifest +/signoz/**/*.spec +/signoz/**/pip-log.txt +/signoz/**/pip-delete-this-directory.txt +/signoz/.tox/ +/signoz/.nox/ +/signoz/**/nosetests.xml +/signoz/**/coverage.xml +/signoz/**/*.cover +/signoz/**/*.py,cover +/signoz/.hypothesis/ +/signoz/**/cover/ +/signoz/**/*.mo +/signoz/**/*.pot +/signoz/**/local_settings.py +/signoz/**/db.sqlite3 +/signoz/**/db.sqlite3-journal +/signoz/**/instance/ +/signoz/**/.webassets-cache +/signoz/**/.scrapy +/signoz/docs/_build/ +/signoz/.pybuilder/ +/signoz/target/ +/signoz/.ipynb_checkpoints +/signoz/**/profile_default/ +/signoz/**/ipython_config.py +/signoz/**/celerybeat-schedule +/signoz/**/celerybeat.pid +/signoz/**/*.sage.py +/signoz/ENV/ +/signoz/env.bak/ +/signoz/venv.bak/ +/signoz/.spyderproject +/signoz/.spyproject +/signoz/.ropeproject +/signoz/site +/signoz/.dmypy.json +/signoz/**/dmypy.json +/signoz/.pyre/ +/signoz/.pytype/ +/signoz/**/cython_debug/ +/signoz/**/pyrightconfig.json diff --git a/README.md b/README.md index c5c886b3e..b3d6e66ec 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# HackToFuture 4.0 — Template +# The_Quad-D03 Welcome to your official HackToFuture 4 repository. diff --git a/backend/.env.example b/backend/.env.example new file mode 100644 index 000000000..a0037faf4 --- /dev/null +++ b/backend/.env.example @@ -0,0 +1,58 @@ +# PipeGenie Environment Configuration +# Copy this to .env and fill in your values + +# MongoDB +MONGODB_URL=mongodb://localhost:27017 +MONGODB_DB=pipegenie + +# Redis (optional - caching) +REDIS_URL=redis://localhost:6379 + +# GitHub +GITHUB_TOKEN=your_github_personal_access_token_here +GITHUB_WEBHOOK_SECRET=pipegenie-webhook-secret +REPO_WRITEBACK_ENABLED=true +AUTO_OPEN_PR=true +PIPEGENIE_BOT_NAME=PipeGenie Bot +PIPEGENIE_BOT_EMAIL=pipegenie-bot@users.noreply.github.com + +# AI Model Selection +# Option 1: Use Gemini API (default) +LLM_PROVIDER=gemini +GEMINI_API_KEY=your_gemini_api_key_here +GEMINI_MODEL=gemini-2.5-flash + +# Option 2: Use local Ollama (manual fallback) +# LLM_PROVIDER=ollama +# OLLAMA_BASE_URL=http://localhost:11434 +# LLM_MODEL=mistral + +# Option 3: Use Mistral API (manual fallback) +# LLM_PROVIDER=mistral +# MISTRAL_API_KEY=your_mistral_api_key_here +# MISTRAL_MODEL=mistral-large-latest + +# Legacy compatibility switch (optional) +USE_OLLAMA=false + +# MilvusDB +MILVUS_HOST=localhost +MILVUS_PORT=19530 + +# Risk Thresholds (0.0-1.0) +RISK_LOW_THRESHOLD=0.3 +RISK_HIGH_THRESHOLD=0.7 + +# Frontend URL (for CORS) +FRONTEND_URL=http://localhost:5173 + +# App +SECRET_KEY=change-this-to-a-long-random-string-in-production +DEBUG=true + +# Observability (SigNoz via OpenTelemetry) +OTEL_ENABLED=true +OTEL_SERVICE_NAME=pipegenie-backend +OTEL_EXPORTER_OTLP_ENDPOINT=http://127.0.0.1:4318/v1/traces +OTEL_EXPORTER_OTLP_INSECURE=true +OTEL_RESOURCE_ATTRIBUTES=service.namespace=pipegenie,deployment.environment=dev diff --git a/backend/Dockerfile b/backend/Dockerfile new file mode 100644 index 000000000..181646a57 --- /dev/null +++ b/backend/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.11-slim + +WORKDIR /app + +# Install system deps for milvus client, docker SDK +RUN apt-get update && apt-get install -y \ + build-essential \ + curl \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +EXPOSE 8000 + +CMD ["uvicorn", "backend.main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/backend/Dockerfile.tests b/backend/Dockerfile.tests new file mode 100644 index 000000000..d30942d19 --- /dev/null +++ b/backend/Dockerfile.tests @@ -0,0 +1,18 @@ +FROM python:3.11-slim + +WORKDIR /workspace + +RUN apt-get update && apt-get install -y \ + build-essential \ + curl \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements.txt /workspace/backend/requirements.txt +RUN pip install --no-cache-dir -r /workspace/backend/requirements.txt + +COPY . /workspace/backend + +ENV PYTHONPATH=/workspace +ENV MONGODB_URL=mongodb://localhost:27017 + +CMD ["python", "-m", "pytest", "-q", "backend/tests/test_risk_evaluator.py"] diff --git a/backend/__init__.py b/backend/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/backend/agents/__init__.py b/backend/agents/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/backend/agents/diagnosis_agent.py b/backend/agents/diagnosis_agent.py new file mode 100644 index 000000000..3b6b27292 --- /dev/null +++ b/backend/agents/diagnosis_agent.py @@ -0,0 +1,313 @@ +""" +Diagnosis Agent – analyzes pipeline logs and identifies root causes of failures. +""" +import re +import json +import logging +import time +from google import genai + +from langchain_core.messages import HumanMessage, SystemMessage +from langchain_community.chat_models import ChatOllama +from langchain_mistralai import ChatMistralAI + +from backend.config import settings +from backend.models.pipeline_event import FailureCategory +from backend.agents.vector_store import VectorStore + +logger = logging.getLogger(__name__) + + +DIAGNOSIS_SYSTEM_PROMPT = """You are PipeGenie's Diagnosis Agent — an expert DevOps AI that analyzes +CI/CD pipeline failure logs to identify root causes. + +Your job: +1. Read the pipeline failure logs carefully +2. Identify the exact root cause +3. Classify the failure category +4. Check if this is a recurring pattern + +Output ONLY valid JSON with this exact structure: +{ + "root_cause": "clear one-sentence root cause", + "failure_category": "dependency_error|test_failure|build_error|config_error|network_error|permissions_error|unknown", + "affected_files": ["file1", "file2"], + "error_lines": ["exact error line from logs"], + "confidence": 0.95, + "summary": "2-3 sentence human-readable summary" +} + +Be precise. Focus on the actual error, not symptoms.""" + + +class DiagnosisAgent: + def __init__(self): + self.vector_store = VectorStore() + self._init_llm() + + def _resolve_provider(self) -> str: + provider = (settings.LLM_PROVIDER or "").strip().lower() + if provider: + return provider + if settings.USE_OLLAMA: + return "ollama" + if settings.MISTRAL_API_KEY: + return "mistral" + return "gemini" + + def _init_llm(self): + self.provider = self._resolve_provider() + self.last_provider_used = self.provider + self.llm = None + self.gemini_client = None + self.ollama_fallback_llm = None + + if self.provider == "gemini": + client_kwargs = {} + if settings.GEMINI_API_KEY: + client_kwargs["api_key"] = settings.GEMINI_API_KEY + self.gemini_client = genai.Client(**client_kwargs) + # Keep a local Ollama fallback so quota/rate limits don't force rule-based fallback. + self.ollama_fallback_llm = ChatOllama( + model=settings.LLM_MODEL, + base_url=settings.OLLAMA_BASE_URL, + temperature=0.1, + format="json" + ) + logger.info(f"[DiagnosisAgent] Using Gemini model: {settings.GEMINI_MODEL}") + elif self.provider == "ollama": + self.llm = ChatOllama( + model=settings.LLM_MODEL, + base_url=settings.OLLAMA_BASE_URL, + temperature=0.1, + format="json" + ) + logger.info(f"[DiagnosisAgent] Using Ollama model: {settings.LLM_MODEL}") + elif self.provider == "mistral": + self.llm = ChatMistralAI( + model=settings.MISTRAL_MODEL, + api_key=settings.MISTRAL_API_KEY, + temperature=0.1 + ) + logger.info(f"[DiagnosisAgent] Using Mistral model: {settings.MISTRAL_MODEL}") + else: + raise ValueError(f"Unsupported LLM_PROVIDER '{self.provider}'. Use gemini, ollama, or mistral.") + + def get_provider_label(self) -> str: + active_provider = getattr(self, "last_provider_used", self.provider) + if active_provider == "gemini": + return f"gemini:{settings.GEMINI_MODEL}" + if active_provider == "mistral": + return f"mistral:{settings.MISTRAL_MODEL}" + return f"ollama:{settings.LLM_MODEL}" + + def _invoke_with_ollama_fallback(self, system_prompt: str, user_prompt: str, reason: str) -> str | None: + if not self.ollama_fallback_llm: + return None + + messages = [ + SystemMessage(content=system_prompt), + HumanMessage(content=user_prompt), + ] + try: + response = self.ollama_fallback_llm.invoke(messages) + self.last_provider_used = "ollama" + logger.warning( + f"[DiagnosisAgent] Switched to Ollama fallback due to Gemini issue: {reason}" + ) + return response.content if hasattr(response, "content") else str(response) + except Exception as fallback_error: + logger.warning( + f"[DiagnosisAgent] Ollama fallback also failed: {fallback_error}" + ) + return None + + def _invoke_with_prompts(self, system_prompt: str, user_prompt: str) -> str: + self.last_provider_used = self.provider + if self.provider == "gemini": + combined_prompt = ( + f"{system_prompt}\n\n" + f"{user_prompt}\n\n" + "Return ONLY valid JSON." + ) + last_error = None + max_attempts = 3 + for attempt in range(1, max_attempts + 1): + try: + response = self.gemini_client.models.generate_content( + model=settings.GEMINI_MODEL, + contents=combined_prompt, + ) + text = getattr(response, "text", None) + self.last_provider_used = "gemini" + return text if text else str(response) + except Exception as e: + last_error = e + error_text = str(e).upper() + is_quota_limited = ( + "429" in error_text + or "RESOURCE_EXHAUSTED" in error_text + or "QUOTA" in error_text + ) + if is_quota_limited: + fallback_response = self._invoke_with_ollama_fallback( + system_prompt, + user_prompt, + str(e), + ) + if fallback_response: + return fallback_response + + is_retryable = ( + "503" in error_text + or "UNAVAILABLE" in error_text + or "429" in error_text + or "RESOURCE_EXHAUSTED" in error_text + or "TIMEOUT" in error_text + ) + if not is_retryable or attempt == max_attempts: + raise + + backoff_seconds = 0.7 * (2 ** (attempt - 1)) + logger.warning( + f"[DiagnosisAgent] Gemini transient error on attempt {attempt}/{max_attempts}: {e}. Retrying in {backoff_seconds:.1f}s" + ) + time.sleep(backoff_seconds) + + if last_error: + raise last_error + + messages = [ + SystemMessage(content=system_prompt), + HumanMessage(content=user_prompt), + ] + response = self.llm.invoke(messages) + self.last_provider_used = self.provider + return response.content if hasattr(response, "content") else str(response) + + def invoke_prompt(self, system_prompt: str, user_prompt: str) -> str: + return self._invoke_with_prompts(system_prompt, user_prompt) + + async def analyze(self, event_id: str, logs: str, repo: str, branch: str, + commit_message: str) -> dict: + """Main diagnosis entry point. Returns structured diagnosis dict.""" + logger.info(f"[DiagnosisAgent] Analyzing event {event_id}") + + # 1. Truncate logs intelligently (keep tail where errors usually are) + processed_logs = self._smart_truncate(logs) + + # 2. Check vector store for similar past failures + similar_cases = await self.vector_store.search_similar_failures( + processed_logs, top_k=3 + ) + similar_context = self._format_similar_cases(similar_cases) + + # 3. Build prompt + user_prompt = f""" +Repository: {repo} +Branch: {branch} +Commit: {commit_message} + +=== PIPELINE FAILURE LOGS === +{processed_logs} + +=== SIMILAR PAST FAILURES (for reference) === +{similar_context} + +Analyze the above logs and return a JSON diagnosis. +""" + # 4. Call LLM + try: + response_text = self._invoke_with_prompts( + DIAGNOSIS_SYSTEM_PROMPT, + user_prompt, + ) + result = self._parse_json_response(response_text) + if self.provider == "gemini" and self.last_provider_used == "ollama": + result["summary"] = ( + f"Gemini quota/rate limit hit; answered via local Ollama fallback: {result.get('summary', '')}".strip() + ) + except Exception as e: + logger.error(f"LLM diagnosis failed: {e}") + result = self._fallback_diagnosis(logs) + if self.provider == "gemini": + result["summary"] = ( + f"Gemini was temporarily unavailable; used rule-based fallback: {result['root_cause']}" + ) + + # 5. Store in vector DB for future recall + await self.vector_store.store_failure( + event_id=event_id, + logs_summary=processed_logs[:500], + diagnosis=result + ) + + logger.info(f"[DiagnosisAgent] Category: {result.get('failure_category')} | Confidence: {result.get('confidence')}") + return result + + def _smart_truncate(self, logs: str, max_chars: int = 4000) -> str: + """Keep the most relevant parts of logs (head + tail).""" + if len(logs) <= max_chars: + return logs + head = logs[:1000] + tail = logs[-(max_chars - 1000):] + return f"{head}\n...[truncated]...\n{tail}" + + def _format_similar_cases(self, cases: list) -> str: + if not cases: + return "No similar past failures found." + lines = [] + for i, case in enumerate(cases, 1): + lines.append(f"Case {i}: {case.get('root_cause', 'N/A')} → Fixed by: {case.get('fix_type', 'N/A')}") + return "\n".join(lines) + + def _parse_json_response(self, content: str) -> dict: + """Extract JSON from LLM response even if wrapped in markdown.""" + if not isinstance(content, str): + content = str(content) + + # Try direct parse + try: + return json.loads(content) + except json.JSONDecodeError: + pass + # Try extracting from markdown code block + match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", content, re.DOTALL) + if match: + return json.loads(match.group(1)) + # Try finding any JSON object + match = re.search(r"\{.*\}", content, re.DOTALL) + if match: + return json.loads(match.group(0)) + raise ValueError(f"Could not parse JSON from response: {content[:200]}") + + def _fallback_diagnosis(self, logs: str) -> dict: + """Rule-based fallback when LLM fails.""" + log_lower = logs.lower() + if "modulenotfounderror" in log_lower or "no module named" in log_lower: + category = "dependency_error" + root_cause = "Missing Python module – dependency not installed" + elif "assertionerror" in log_lower or "failed test" in log_lower or "pytest" in log_lower: + category = "test_failure" + root_cause = "Unit test assertion failed" + elif "syntaxerror" in log_lower or "unexpected token" in log_lower: + category = "build_error" + root_cause = "Syntax error in source code" + elif "permission denied" in log_lower: + category = "permissions_error" + root_cause = "File or resource permission denied" + elif "connection refused" in log_lower or "timeout" in log_lower: + category = "network_error" + root_cause = "Network connection issue" + else: + category = "unknown" + root_cause = "Unknown pipeline failure – manual inspection required" + + return { + "root_cause": root_cause, + "failure_category": category, + "affected_files": [], + "error_lines": [], + "confidence": 0.5, + "summary": f"Automated fallback diagnosis: {root_cause}" + } diff --git a/backend/agents/fixer_agent.py b/backend/agents/fixer_agent.py new file mode 100644 index 000000000..49b811ba5 --- /dev/null +++ b/backend/agents/fixer_agent.py @@ -0,0 +1,279 @@ +""" +Fixer Agent – Generates actionable remediation scripts based on diagnosis. +""" +import json +import logging +import re +import time +from google import genai + +from langchain_core.messages import HumanMessage, SystemMessage +from langchain_community.chat_models import ChatOllama +from langchain_mistralai import ChatMistralAI + +from backend.config import settings +from backend.agents.vector_store import VectorStore + +logger = logging.getLogger(__name__) + + +FIXER_SYSTEM_PROMPT = """You are PipeGenie's Fixer Agent — an expert DevOps AI that generates +safe, executable remediation scripts for CI/CD pipeline failures. + +Given a diagnosis, generate a precise fix. Output ONLY valid JSON: +{ + "fix_type": "dependency|config|patch|build|test|permissions|network", + "fix_description": "1-2 sentence human-readable fix description", + "fix_script": "#!/bin/bash\\n# exact commands to fix the issue", + "pre_conditions": ["condition 1 to verify before running"], + "post_conditions": ["what to verify after fix"], + "estimated_risk": 0.2, + "requires_restart": false, + "rollback_script": "#!/bin/bash\\n# commands to undo the fix if needed" +} + +Rules: +- Scripts must be safe and idempotent (can run multiple times safely) +- Use conditional checks before making changes (e.g., `if ! command -v xyz; then ...`) +- Never delete production data +- Always include error handling with `set -e` +- Prefer additive changes over replacements +- Only modify files inside the checked-out repository workspace +- Do not use absolute system paths like /etc, /var, /usr +- Do not require sudo; assume CI container context without privileged escalation +- Avoid commands that mutate host runtime outside repository contents""" + + +class FixerAgent: + def __init__(self): + self.vector_store = VectorStore() + self._init_llm() + + def _resolve_provider(self) -> str: + provider = (settings.LLM_PROVIDER or "").strip().lower() + if provider: + return provider + if settings.USE_OLLAMA: + return "ollama" + if settings.MISTRAL_API_KEY: + return "mistral" + return "gemini" + + def _init_llm(self): + self.provider = self._resolve_provider() + self.last_provider_used = self.provider + self.llm = None + self.gemini_client = None + self.ollama_fallback_llm = None + + if self.provider == "gemini": + client_kwargs = {} + if settings.GEMINI_API_KEY: + client_kwargs["api_key"] = settings.GEMINI_API_KEY + self.gemini_client = genai.Client(**client_kwargs) + # Keep a local Ollama fallback so quota/rate limits don't force rule-based fixes. + self.ollama_fallback_llm = ChatOllama( + model=settings.LLM_MODEL, + base_url=settings.OLLAMA_BASE_URL, + temperature=0.2, + format="json" + ) + logger.info(f"[FixerAgent] Using Gemini model: {settings.GEMINI_MODEL}") + elif self.provider == "ollama": + self.llm = ChatOllama( + model=settings.LLM_MODEL, + base_url=settings.OLLAMA_BASE_URL, + temperature=0.2, + format="json" + ) + logger.info(f"[FixerAgent] Using Ollama model: {settings.LLM_MODEL}") + elif self.provider == "mistral": + self.llm = ChatMistralAI( + model=settings.MISTRAL_MODEL, + api_key=settings.MISTRAL_API_KEY, + temperature=0.2 + ) + logger.info(f"[FixerAgent] Using Mistral model: {settings.MISTRAL_MODEL}") + else: + raise ValueError(f"Unsupported LLM_PROVIDER '{self.provider}'. Use gemini, ollama, or mistral.") + + def _invoke_with_ollama_fallback(self, system_prompt: str, user_prompt: str, reason: str) -> str | None: + if not self.ollama_fallback_llm: + return None + + messages = [ + SystemMessage(content=system_prompt), + HumanMessage(content=user_prompt), + ] + try: + response = self.ollama_fallback_llm.invoke(messages) + self.last_provider_used = "ollama" + logger.warning( + f"[FixerAgent] Switched to Ollama fallback due to Gemini issue: {reason}" + ) + return response.content if hasattr(response, "content") else str(response) + except Exception as fallback_error: + logger.warning( + f"[FixerAgent] Ollama fallback also failed: {fallback_error}" + ) + return None + + def _invoke_with_prompts(self, system_prompt: str, user_prompt: str) -> str: + self.last_provider_used = self.provider + if self.provider == "gemini": + combined_prompt = ( + f"{system_prompt}\n\n" + f"{user_prompt}\n\n" + "Return ONLY valid JSON." + ) + last_error = None + max_attempts = 3 + for attempt in range(1, max_attempts + 1): + try: + response = self.gemini_client.models.generate_content( + model=settings.GEMINI_MODEL, + contents=combined_prompt, + ) + text = getattr(response, "text", None) + self.last_provider_used = "gemini" + return text if text else str(response) + except Exception as e: + last_error = e + error_text = str(e).upper() + is_quota_limited = ( + "429" in error_text + or "RESOURCE_EXHAUSTED" in error_text + or "QUOTA" in error_text + ) + if is_quota_limited: + fallback_response = self._invoke_with_ollama_fallback( + system_prompt, + user_prompt, + str(e), + ) + if fallback_response: + return fallback_response + + is_retryable = ( + "503" in error_text + or "UNAVAILABLE" in error_text + or "429" in error_text + or "RESOURCE_EXHAUSTED" in error_text + or "TIMEOUT" in error_text + ) + if not is_retryable or attempt == max_attempts: + raise + + backoff_seconds = 0.7 * (2 ** (attempt - 1)) + logger.warning( + f"[FixerAgent] Gemini transient error on attempt {attempt}/{max_attempts}: {e}. Retrying in {backoff_seconds:.1f}s" + ) + time.sleep(backoff_seconds) + + if last_error: + raise last_error + + messages = [ + SystemMessage(content=system_prompt), + HumanMessage(content=user_prompt), + ] + response = self.llm.invoke(messages) + self.last_provider_used = self.provider + return response.content if hasattr(response, "content") else str(response) + + async def generate_fix(self, diagnosis: dict, repo: str, branch: str, + raw_logs: str) -> dict: + """Generate a fix plan from diagnosis output.""" + logger.info(f"[FixerAgent] Generating fix for category: {diagnosis.get('failure_category')}") + + # Check if there's a known fix from vector store + similar_fixes = await self.vector_store.search_known_fixes( + diagnosis.get("failure_category", "unknown"), + diagnosis.get("root_cause", ""), + top_k=2 + ) + + similar_context = "" + if similar_fixes: + fixes_text = "\n".join([ + f"Fix #{i+1}: {f.get('fix_description')} → Script: {f.get('fix_script', '')[:200]}" + for i, f in enumerate(similar_fixes) + ]) + similar_context = f"\n=== KNOWN FIXES FOR SIMILAR FAILURES ===\n{fixes_text}" + + user_prompt = f""" +Repository: {repo} +Branch: {branch} + +=== DIAGNOSIS === +Root Cause: {diagnosis.get('root_cause')} +Category: {diagnosis.get('failure_category')} +Affected Files: {', '.join(diagnosis.get('affected_files', []))} +Error Lines: {chr(10).join(diagnosis.get('error_lines', [])[:5])} + +=== RELEVANT LOG SNIPPET === +{raw_logs[-1500:]} +{similar_context} + +Generate a safe, executable fix script for this CI/CD pipeline failure. +""" + try: + response_text = self._invoke_with_prompts( + FIXER_SYSTEM_PROMPT, + user_prompt, + ) + result = self._parse_json_response(response_text) + if self.provider == "gemini" and self.last_provider_used == "ollama": + result["fix_description"] = ( + f"Gemini quota/rate limit hit; generated via local Ollama fallback: {result.get('fix_description', '')}".strip() + ) + except Exception as e: + logger.error(f"FixerAgent LLM failed: {e}") + result = self._fallback_fix(diagnosis) + if self.provider == "gemini": + result["fix_description"] = ( + f"Gemini was temporarily unavailable; using fallback fix strategy for {diagnosis.get('failure_category', 'unknown')}" + ) + + logger.info(f"[FixerAgent] Fix type: {result.get('fix_type')}, Risk: {result.get('estimated_risk')}") + return result + + def _parse_json_response(self, content: str) -> dict: + if not isinstance(content, str): + content = str(content) + + try: + return json.loads(content) + except json.JSONDecodeError: + pass + match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", content, re.DOTALL) + if match: + return json.loads(match.group(1)) + match = re.search(r"\{.*\}", content, re.DOTALL) + if match: + return json.loads(match.group(0)) + raise ValueError("Could not parse fix JSON") + + def _fallback_fix(self, diagnosis: dict) -> dict: + """Rule-based fallback fixes for common categories.""" + cat = diagnosis.get("failure_category", "unknown") + + scripts = { + "dependency_error": "#!/bin/bash\nset -e\necho 'Installing dependencies...'\npip install -r requirements.txt\necho 'Done.'", + "test_failure": "#!/bin/bash\nset -e\necho 'Running tests in verbose mode to identify failures...'\npytest --tb=short -v || true", + "build_error": "#!/bin/bash\nset -e\necho 'Cleaning build artifacts...'\nfind . -name '__pycache__' -type d -exec rm -rf {} + 2>/dev/null || true\nfind . -name '*.pyc' -delete 2>/dev/null || true\nnpm run build 2>/dev/null || pip install -e . 2>/dev/null || echo 'Build cleanup done'", + "permissions_error": "#!/bin/bash\nset -e\necho 'Fixing permissions...'\nchmod +x scripts/*.sh 2>/dev/null || true\nchown -R $(whoami) . 2>/dev/null || true", + "network_error": "#!/bin/bash\nset -e\necho 'Checking network and retrying...'\nfor i in 1 2 3; do\n wget -q --spider https://pypi.org && break\n echo \"Retry $i...\"\n sleep 10\ndone\npip install --retries 5 -r requirements.txt", + } + + script = scripts.get(cat, "#!/bin/bash\necho 'Manual intervention required'\nexit 1") + return { + "fix_type": cat.replace("_error", "") if cat != "unknown" else "manual", + "fix_description": f"Automated fallback fix for {cat}", + "fix_script": script, + "pre_conditions": [], + "post_conditions": [], + "estimated_risk": 0.4, + "requires_restart": False, + "rollback_script": "#!/bin/bash\necho 'No rollback needed for this fix'" + } diff --git a/backend/agents/orchestrator.py b/backend/agents/orchestrator.py new file mode 100644 index 000000000..fe08d5d00 --- /dev/null +++ b/backend/agents/orchestrator.py @@ -0,0 +1,454 @@ +""" +Agent Orchestrator – coordinates Diagnosis → Fixer → Guardian → Executor pipeline. +""" +import logging +import asyncio +from datetime import datetime, timedelta, timezone +from opentelemetry import trace + +from backend.agents.diagnosis_agent import DiagnosisAgent +from backend.agents.fixer_agent import FixerAgent +from backend.guardian.risk_evaluator import RiskEvaluator +from backend.executor.docker_runner import DockerRunner +from backend.models.pipeline_event import PipelineEvent, PipelineStatus, FailureCategory +from backend.models.approval_request import ApprovalRequest, ApprovalStatus +from backend.models.fix_record import FixRecord, FixStatus +from backend.services.github_service import GitHubService +from backend.agents.vector_store import VectorStore +from backend.config import settings + +logger = logging.getLogger(__name__) +tracer = trace.get_tracer(__name__) + + +class AgentOrchestrator: + def __init__(self, ws_manager=None): + self.diagnosis_agent = DiagnosisAgent() + self.fixer_agent = FixerAgent() + self.risk_evaluator = RiskEvaluator() + self.docker_runner = DockerRunner() + self.github_service = GitHubService() + self.vector_store = VectorStore() + self.ws_manager = ws_manager # WebSocket broadcast manager + + async def _append_timeline( + self, + event: PipelineEvent, + step: str, + message: str, + status: PipelineStatus | None = None, + details: dict | None = None, + ): + timeline = event.metadata.get("timeline") + if not isinstance(timeline, list): + timeline = [] + + timeline.append({ + "step": step, + "message": message, + "status": (status or event.status).value if hasattr((status or event.status), "value") else str(status or event.status), + "timestamp": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"), + "details": details or {}, + }) + + event.metadata["timeline"] = timeline + event.update_timestamp() + + async def process_failure(self, event: PipelineEvent): + """Main orchestration pipeline for a pipeline failure.""" + logger.info(f"[Orchestrator] Starting processing for event {event.event_id}") + + try: + with tracer.start_as_current_span("orchestrator.process_failure") as process_span: + process_span.set_attribute("pipegenie.event_id", event.event_id) + process_span.set_attribute("pipegenie.repo", event.repo_full_name) + process_span.set_attribute("pipegenie.branch", event.branch) + + # === STEP 1: DIAGNOSIS === + with tracer.start_as_current_span("orchestrator.diagnosis") as diagnosis_span: + event.status = PipelineStatus.DIAGNOSING + await self._append_timeline( + event, + "diagnosis_started", + "Diagnosis started", + details={"repo": event.repo_full_name, "branch": event.branch}, + ) + await event.save() + await self._broadcast("diagnosis_started", event) + + diagnosis = await self.diagnosis_agent.analyze( + event_id=str(event.id), + logs=event.raw_logs, + repo=event.repo_full_name, + branch=event.branch, + commit_message=event.commit_message + ) + + event.root_cause = diagnosis.get("root_cause") + event.log_summary = diagnosis.get("summary", "") + event.failure_category = FailureCategory( + diagnosis.get("failure_category", "unknown") + ) + event.metadata["diagnosis"] = diagnosis + diagnosis_span.set_attribute( + "pipegenie.failure_category", + diagnosis.get("failure_category", "unknown"), + ) + diagnosis_span.set_attribute( + "pipegenie.diagnosis_confidence", + float(diagnosis.get("confidence", 0.0) or 0.0), + ) + await self._append_timeline( + event, + "diagnosis_complete", + "Diagnosis completed", + details={"failure_category": diagnosis.get("failure_category", "unknown")}, + ) + await event.save() + await self._broadcast("diagnosis_complete", event, extra={"diagnosis": diagnosis}) + + # === STEP 2: FIX GENERATION === + with tracer.start_as_current_span("orchestrator.fix_generation") as fix_span: + event.status = PipelineStatus.FIX_PENDING + await self._append_timeline( + event, + "fix_generation_started", + "Fix generation started", + ) + await event.save() + + fix = await self.fixer_agent.generate_fix( + diagnosis=diagnosis, + repo=event.repo_full_name, + branch=event.branch, + raw_logs=event.raw_logs + ) + + event.proposed_fix = fix.get("fix_description") + event.fix_script = fix.get("fix_script") + event.metadata["fix"] = fix + fix_span.set_attribute("pipegenie.fix_type", fix.get("fix_type", "unknown")) + await self._append_timeline( + event, + "fix_generated", + "Fix generated", + details={"fix_type": fix.get("fix_type", "unknown")}, + ) + await event.save() + await self._broadcast("fix_generated", event, extra={"fix": fix}) + + # === STEP 3: GUARDIAN RISK EVALUATION === + with tracer.start_as_current_span("orchestrator.risk_evaluation") as risk_span: + risk = self.risk_evaluator.evaluate( + fix=fix, + diagnosis=diagnosis, + repo=event.repo_full_name, + branch=event.branch + ) + + event.risk_score = risk["score"] + event.risk_level = risk["level"] + event.metadata["risk"] = risk + risk_span.set_attribute("pipegenie.risk_level", risk["level"]) + risk_span.set_attribute("pipegenie.risk_score", float(risk["score"])) + await self._append_timeline( + event, + "risk_evaluated", + "Risk evaluated", + details={"risk_level": risk["level"], "risk_score": risk["score"]}, + ) + await event.save() + await self._broadcast("risk_evaluated", event, extra={"risk": risk}) + + # === STEP 4: ALWAYS REQUIRE HUMAN APPROVAL === + logger.info( + f"[Orchestrator] Routing fix to human approval (risk={risk['level']}, score={risk['score']})" + ) + process_span.set_attribute("pipegenie.final_risk_level", risk["level"]) + process_span.set_attribute("pipegenie.final_risk_score", float(risk["score"])) + await self._request_approval(event, fix, risk) + + except Exception as e: + logger.error(f"[Orchestrator] Pipeline failed for {event.event_id}: {e}", exc_info=True) + event.status = PipelineStatus.FAILED_TO_FIX + event.metadata["error"] = str(e) + event.update_timestamp() + await event.save() + await self._broadcast("processing_failed", event, extra={"error": str(e)}) + + async def _auto_apply_fix( + self, + event: PipelineEvent, + fix: dict, + risk: dict, + auto_applied: bool = True, + approved_by: str | None = None + ): + """Execute fix in Docker and trigger re-run.""" + event.status = PipelineStatus.FIXING + await self._append_timeline( + event, + "fix_execution_started", + "Fix execution started", + details={"approved_by": approved_by, "auto_applied": auto_applied}, + ) + await event.save() + await self._broadcast("fix_applying", event) + + # Execute in Docker + result = await self.docker_runner.run_fix( + fix_script=fix.get("fix_script", ""), + repo_url=f"https://github.com/{event.repo_full_name}", + branch=event.branch, + event_id=str(event.id), + repo_full_name=event.repo_full_name, + ) + + # Save fix record + fix_record = FixRecord( + event_id=str(event.id), + repo_full_name=event.repo_full_name, + fix_type=fix.get("fix_type", "unknown"), + fix_script=fix.get("fix_script", ""), + fix_output=result.get("output", ""), + exit_code=result.get("exit_code", 1), + status=FixStatus.SUCCESS if result.get("exit_code") == 0 else FixStatus.FAILED, + duration_seconds=result.get("duration", 0), + auto_applied=auto_applied, + container_id=result.get("container_id"), + metadata={ + "approved_by": approved_by, + "fix_branch": result.get("fix_branch") + } if approved_by else {"fix_branch": result.get("fix_branch")} + ) + await fix_record.insert() + + if result.get("exit_code") == 0: + event.fix_applied = True + event.fix_output = result.get("output", "") + event.status = PipelineStatus.RETRYING + fix_branch = result.get("fix_branch") + await self._append_timeline( + event, + "fix_execution_succeeded", + "Fix execution succeeded", + details={"fix_branch": fix_branch}, + ) + + # Store fix in vector DB for future use + await self.vector_store.store_fix( + event_id=str(event.id), + failure_category=str(event.failure_category), + root_cause=event.root_cause or "", + fix_data=fix + ) + + if settings.REPO_WRITEBACK_ENABLED and settings.AUTO_OPEN_PR and fix_branch: + pr = await self.github_service.create_pull_request( + repo=event.repo_full_name, + head_branch=fix_branch, + base_branch=event.branch, + title=f"PipeGenie fix: {event.workflow_name} failure", + body=( + f"Automated fix generated by PipeGenie for event `{event.event_id}`.\n\n" + f"Root cause: {event.root_cause or 'Unknown'}\n" + f"Risk: {event.risk_level or 'unknown'} ({event.risk_score})" + ) + ) + if pr: + event.metadata["pull_request"] = pr + + # Trigger GitHub Actions re-run when no PR flow is available. + rerun_ok = False + if not event.metadata.get("pull_request"): + rerun_ok = await self.github_service.trigger_rerun( + repo=event.repo_full_name, + run_id=event.event_id + ) + + event.re_run_triggered = rerun_ok + event.status = PipelineStatus.FIXED + await self._append_timeline( + event, + "pipeline_resolved", + "Pipeline resolved", + details={"rerun_triggered": rerun_ok, "pull_request": event.metadata.get("pull_request")}, + ) + else: + event.status = PipelineStatus.FAILED_TO_FIX + event.fix_output = result.get("output", "") + await self._append_timeline( + event, + "fix_execution_failed", + "Fix execution failed", + details={"exit_code": result.get("exit_code", 1)}, + ) + + await event.save() + await self._broadcast("fix_complete", event, extra={"result": result}) + + async def _request_approval(self, event: PipelineEvent, fix: dict, risk: dict): + """Create an approval request for a generated fix.""" + event.status = PipelineStatus.AWAITING_APPROVAL + await self._append_timeline( + event, + "approval_requested", + "Approval requested", + details={"risk_level": risk["level"], "risk_score": risk["score"]}, + ) + await event.save() + + timing = risk.get("timing", {}) if isinstance(risk, dict) else {} + + approval = ApprovalRequest( + event_id=str(event.id), + repo_full_name=event.repo_full_name, + branch=event.branch, + commit_sha=event.commit_sha, + root_cause=event.root_cause or "", + proposed_fix=fix.get("fix_description", ""), + fix_script=fix.get("fix_script", ""), + risk_score=risk["score"], + risk_level=risk["level"], + risk_reasons=risk.get("reasons", []), + estimated_duration_seconds=float(timing.get("estimated_seconds", 0.0) or 0.0), + timing_level=timing.get("level", "unknown"), + timing_reasons=timing.get("reasons", []), + expires_at=datetime.utcnow() + timedelta(hours=24) + ) + await approval.insert() + + event.metadata["approval_id"] = str(approval.id) + event.update_timestamp() + await event.save() + await self._broadcast("approval_required", event, extra={ + "approval_id": str(approval.id), + "risk": risk + }) + + async def execute_approved_fix( + self, + approval_id: str, + reviewer: str, + note: str = "", + edited_fix_script: str | None = None, + ): + """Called when a human approves a fix.""" + approval = await ApprovalRequest.get(approval_id) + if not approval or approval.status != ApprovalStatus.PENDING: + raise ValueError("Approval not found or already processed") + + event = await PipelineEvent.get(approval.event_id) + if not event: + raise ValueError("Pipeline event not found") + + fix = dict(event.metadata.get("fix", {}) or {}) + if not fix.get("fix_script"): + fix["fix_script"] = approval.fix_script or "" + if not fix.get("fix_description"): + fix["fix_description"] = approval.proposed_fix or event.proposed_fix or "" + + risk = dict(event.metadata.get("risk", {}) or {}) + + edit_applied = False + if edited_fix_script is not None: + edited_value = edited_fix_script.strip() + if not edited_value: + raise ValueError("Edited script cannot be empty") + + existing_script = (approval.fix_script or "").strip() + if edited_value != existing_script: + fix["fix_script"] = edited_value + diagnosis = event.metadata.get("diagnosis", {}) + if not isinstance(diagnosis, dict): + diagnosis = {} + + risk = self.risk_evaluator.evaluate( + fix=fix, + diagnosis=diagnosis, + repo=event.repo_full_name, + branch=event.branch, + ) + + event.fix_script = edited_value + event.risk_score = risk.get("score") + event.risk_level = risk.get("level") + event.metadata["fix"] = fix + event.metadata["risk"] = risk + + approval.fix_script = edited_value + approval.risk_score = risk.get("score", approval.risk_score) + approval.risk_level = risk.get("level", approval.risk_level) + approval.risk_reasons = risk.get("reasons", []) + timing = risk.get("timing", {}) + approval.estimated_duration_seconds = float(timing.get("estimated_seconds", 0.0) or 0.0) + approval.timing_level = timing.get("level", "unknown") + approval.timing_reasons = timing.get("reasons", []) + + edit_applied = True + await self._append_timeline( + event, + "approval_script_edited", + f"Fix script edited by {reviewer}", + details={"reviewer": reviewer, "line_count": len([line for line in edited_value.splitlines() if line.strip()])}, + ) + await event.save() + + approval.status = ApprovalStatus.APPROVED + approval.reviewed_by = reviewer + approval.reviewer_note = note + approval.reviewed_at = datetime.utcnow() + await approval.save() + + await self._append_timeline( + event, + "approval_approved", + f"Approval approved by {reviewer}", + details={"reviewer": reviewer, "note": note, "script_edited": edit_applied}, + ) + await event.save() + await self._auto_apply_fix(event, fix, risk, auto_applied=False, approved_by=reviewer) + + async def reject_fix(self, approval_id: str, reviewer: str, note: str = ""): + """Called when a human rejects a fix.""" + approval = await ApprovalRequest.get(approval_id) + if not approval: + raise ValueError("Approval not found") + + approval.status = ApprovalStatus.REJECTED + approval.reviewed_by = reviewer + approval.reviewer_note = note + approval.reviewed_at = datetime.utcnow() + await approval.save() + + event = await PipelineEvent.get(approval.event_id) + if event: + event.status = PipelineStatus.FAILED_TO_FIX + event.metadata["rejection_note"] = note + await self._append_timeline( + event, + "approval_rejected", + f"Approval rejected by {reviewer}", + details={"reviewer": reviewer, "note": note}, + ) + await event.save() + await self._broadcast("fix_rejected", event) + + async def _broadcast(self, event_type: str, event: PipelineEvent, extra: dict = None): + """Send WebSocket broadcast to all connected clients.""" + if self.ws_manager: + payload = { + "type": event_type, + "event_id": event.event_id, + "repo": event.repo_full_name, + "branch": event.branch, + "status": event.status, + "risk_score": event.risk_score, + "risk_level": event.risk_level, + "root_cause": event.root_cause, + "proposed_fix": event.proposed_fix, + "timestamp": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z") + } + if extra: + payload.update(extra) + await self.ws_manager.broadcast(payload) diff --git a/backend/agents/vector_store.py b/backend/agents/vector_store.py new file mode 100644 index 000000000..bda56e553 --- /dev/null +++ b/backend/agents/vector_store.py @@ -0,0 +1,368 @@ +""" +MilvusDB Vector Store – stores failure patterns and known fixes for fast recall. +""" +import logging +from typing import Optional, List, Dict, Any + +from backend.config import settings + +logger = logging.getLogger(__name__) + +# Embedding dimension (using sentence-transformers default) +EMBEDDING_DIM = 384 + + +class VectorStore: + _instance = None + + def __new__(cls): + if cls._instance is None: + cls._instance = super().__new__(cls) + cls._instance._initialized = False + return cls._instance + + def __init__(self): + if self._initialized: + return + self._initialized = True + try: + # Lazy import pymilvus to avoid pkg_resources issues at module load time + from pymilvus import connections + + # Connect to Milvus + connections.connect( + alias="default", + host=settings.MILVUS_HOST, + port=settings.MILVUS_PORT, + pool_name="default" + ) + logger.info(f"[VectorStore] Connected to Milvus at {settings.MILVUS_HOST}:{settings.MILVUS_PORT}") + + # Initialize collections + self._init_collections() + self.client = connections # Store connection reference + logger.info("[VectorStore] MilvusDB initialized successfully") + except Exception as e: + logger.error(f"[VectorStore] Failed to init MilvusDB: {e}") + self.client = None + + def _init_collections(self): + """Initialize Milvus collections for failures and fixes.""" + try: + # Lazy imports for collection initialization + from pymilvus import Collection, FieldSchema, CollectionSchema, DataType, utility + + # Define schema for pipeline_failures collection + failures_fields = [ + FieldSchema(name="id", dtype=DataType.VARCHAR, is_primary=True, max_length=256), + FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=EMBEDDING_DIM), + FieldSchema(name="event_id", dtype=DataType.VARCHAR, max_length=256), + FieldSchema(name="root_cause", dtype=DataType.VARCHAR, max_length=512), + FieldSchema(name="failure_category", dtype=DataType.VARCHAR, max_length=256), + FieldSchema(name="confidence", dtype=DataType.FLOAT), + FieldSchema(name="document", dtype=DataType.VARCHAR, max_length=2048), + ] + failures_schema = CollectionSchema( + fields=failures_fields, + description="Pipeline failure patterns", + enable_dynamic_field=True + ) + + # Create or get failures collection + if utility.has_collection("pipeline_failures", using="default"): + self.failures_collection = Collection( + name="pipeline_failures", + using="default" + ) + else: + self.failures_collection = Collection( + name="pipeline_failures", + schema=failures_schema, + using="default" + ) + # Create index for similarity search + self.failures_collection.create_index( + field_name="embedding", + index_params={ + "metric_type": "COSINE", + "index_type": "IVF_FLAT", + "params": {"nlist": 128} + } + ) + logger.info("[VectorStore] Created pipeline_failures collection") + + # Define schema for known_fixes collection + fixes_fields = [ + FieldSchema(name="id", dtype=DataType.VARCHAR, is_primary=True, max_length=256), + FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=EMBEDDING_DIM), + FieldSchema(name="event_id", dtype=DataType.VARCHAR, max_length=256), + FieldSchema(name="failure_category", dtype=DataType.VARCHAR, max_length=256), + FieldSchema(name="fix_type", dtype=DataType.VARCHAR, max_length=256), + FieldSchema(name="fix_description", dtype=DataType.VARCHAR, max_length=1024), + FieldSchema(name="fix_script", dtype=DataType.VARCHAR, max_length=2048), + FieldSchema(name="document", dtype=DataType.VARCHAR, max_length=2048), + ] + fixes_schema = CollectionSchema( + fields=fixes_fields, + description="Known fixes for failures", + enable_dynamic_field=True + ) + + # Create or get fixes collection + if utility.has_collection("known_fixes", using="default"): + self.fixes_collection = Collection( + name="known_fixes", + using="default" + ) + else: + self.fixes_collection = Collection( + name="known_fixes", + schema=fixes_schema, + using="default" + ) + # Create index for similarity search + self.fixes_collection.create_index( + field_name="embedding", + index_params={ + "metric_type": "COSINE", + "index_type": "IVF_FLAT", + "params": {"nlist": 128} + } + ) + logger.info("[VectorStore] Created known_fixes collection") + + # Collections must be loaded in memory before search. + self.failures_collection.load() + self.fixes_collection.load() + + except Exception as e: + logger.error(f"[VectorStore] Failed to initialize collections: {e}") + raise + + def _get_embedding(self, text: str) -> List[float]: + """Generate embedding for text using a simple approach. + + In production, you would use a proper embedding model. + For now, we'll use basic TF-IDF-like approach or integrate with Ollama. + """ + try: + # Try to use Ollama for embeddings + import httpx + response = httpx.post( + f"{settings.OLLAMA_BASE_URL}/api/embeddings", + json={"model": settings.LLM_MODEL, "prompt": text} + ) + if response.status_code == 200: + return response.json()["embedding"] + except Exception as e: + logger.debug(f"[VectorStore] Ollama embedding failed: {e}") + + # Fallback: create a simple deterministic embedding + # In production, replace with proper embedding model + hash_val = hash(text) + import random + random.seed(abs(hash_val)) + return [random.random() for _ in range(EMBEDDING_DIM)] + + def _normalize_embedding(self, embedding: List[float]) -> List[float]: + """Ensure embedding matches Milvus schema dimension and float type.""" + if not embedding: + return [0.0] * EMBEDDING_DIM + + normalized = [float(x) for x in embedding] + if len(normalized) > EMBEDDING_DIM: + return normalized[:EMBEDDING_DIM] + if len(normalized) < EMBEDDING_DIM: + return normalized + [0.0] * (EMBEDDING_DIM - len(normalized)) + return normalized + + def _get_hit_value(self, hit: Any, key: str, default: Any = "") -> Any: + """Read a field value from pymilvus search hit across SDK variants.""" + try: + value = hit.get(key) + return default if value is None else value + except Exception: + pass + + try: + value = hit.entity.get(key) + return default if value is None else value + except Exception: + return default + + async def store_failure(self, event_id: str, logs_summary: str, diagnosis: dict): + """Store a failure pattern for future similarity search.""" + if not self.client: + return + try: + # Generate embedding + embedding = self._normalize_embedding(self._get_embedding(logs_summary)) + + # Prepare data + entry_id = f"failure_{event_id}" + data = [ + [entry_id], + [embedding], + [event_id], + [diagnosis.get("root_cause", "")], + [diagnosis.get("failure_category", "")], + [float(diagnosis.get("confidence", 0.5))], + [logs_summary], + ] + + # Insert data + self.failures_collection.insert(data) + self.failures_collection.flush() + + logger.debug(f"[VectorStore] Stored failure {entry_id}") + except Exception as e: + logger.warning(f"[VectorStore] Store failure error: {e}") + + async def store_fix(self, event_id: str, failure_category: str, + root_cause: str, fix_data: dict): + """Store a successful fix for future recall.""" + if not self.client: + return + try: + # Generate embedding + doc = f"{failure_category}: {root_cause}" + embedding = self._normalize_embedding(self._get_embedding(doc)) + + # Prepare data + entry_id = f"fix_{event_id}" + data = [ + [entry_id], + [embedding], + [event_id], + [failure_category], + [fix_data.get("fix_type", "")], + [fix_data.get("fix_description", "")], + [fix_data.get("fix_script", "")[:500]], + [doc], + ] + + # Insert data + self.fixes_collection.insert(data) + self.fixes_collection.flush() + + logger.debug(f"[VectorStore] Stored fix {entry_id}") + except Exception as e: + logger.warning(f"[VectorStore] Store fix error: {e}") + + async def search_similar_failures(self, logs: str, top_k: int = 3) -> list: + """Find similar past failures.""" + if not self.client: + return [] + query_embedding = self._normalize_embedding(self._get_embedding(logs[:500])) + try: + self.failures_collection.load() + + # Search + search_params = {"metric_type": "COSINE", "params": {"nprobe": 10}} + results = self.failures_collection.search( + data=[query_embedding], + anns_field="embedding", + param=search_params, + limit=top_k, + output_fields=["event_id", "root_cause", "failure_category", "confidence"] + ) + + # Format results + metadatas = [] + if results and len(results) > 0: + for hit in results[0]: + metadatas.append({ + "event_id": self._get_hit_value(hit, "event_id", ""), + "root_cause": self._get_hit_value(hit, "root_cause", ""), + "failure_category": self._get_hit_value(hit, "failure_category", ""), + "confidence": str(self._get_hit_value(hit, "confidence", 0)) + }) + + return metadatas + except Exception as e: + if "collection not loaded" in str(e).lower(): + try: + self.failures_collection.load() + search_params = {"metric_type": "COSINE", "params": {"nprobe": 10}} + results = self.failures_collection.search( + data=[query_embedding], + anns_field="embedding", + param=search_params, + limit=top_k, + output_fields=["event_id", "root_cause", "failure_category", "confidence"] + ) + metadatas = [] + if results and len(results) > 0: + for hit in results[0]: + metadatas.append({ + "event_id": self._get_hit_value(hit, "event_id", ""), + "root_cause": self._get_hit_value(hit, "root_cause", ""), + "failure_category": self._get_hit_value(hit, "failure_category", ""), + "confidence": str(self._get_hit_value(hit, "confidence", 0)) + }) + return metadatas + except Exception: + pass + logger.warning(f"[VectorStore] Search failures error: {e}") + return [] + + async def search_known_fixes(self, failure_category: str, root_cause: str, + top_k: int = 2) -> list: + """Find known fixes for a category.""" + if not self.client: + return [] + query = f"{failure_category}: {root_cause}" + query_embedding = self._normalize_embedding(self._get_embedding(query)) + try: + self.fixes_collection.load() + + # Search + search_params = {"metric_type": "COSINE", "params": {"nprobe": 10}} + results = self.fixes_collection.search( + data=[query_embedding], + anns_field="embedding", + param=search_params, + limit=top_k, + output_fields=["event_id", "failure_category", "fix_type", "fix_description", "fix_script"] + ) + + # Format results + metadatas = [] + if results and len(results) > 0: + for hit in results[0]: + metadatas.append({ + "event_id": self._get_hit_value(hit, "event_id", ""), + "failure_category": self._get_hit_value(hit, "failure_category", ""), + "fix_type": self._get_hit_value(hit, "fix_type", ""), + "fix_description": self._get_hit_value(hit, "fix_description", ""), + "fix_script": self._get_hit_value(hit, "fix_script", "") + }) + + return metadatas + except Exception as e: + if "collection not loaded" in str(e).lower(): + try: + self.fixes_collection.load() + search_params = {"metric_type": "COSINE", "params": {"nprobe": 10}} + results = self.fixes_collection.search( + data=[query_embedding], + anns_field="embedding", + param=search_params, + limit=top_k, + output_fields=["event_id", "failure_category", "fix_type", "fix_description", "fix_script"] + ) + metadatas = [] + if results and len(results) > 0: + for hit in results[0]: + metadatas.append({ + "event_id": self._get_hit_value(hit, "event_id", ""), + "failure_category": self._get_hit_value(hit, "failure_category", ""), + "fix_type": self._get_hit_value(hit, "fix_type", ""), + "fix_description": self._get_hit_value(hit, "fix_description", ""), + "fix_script": self._get_hit_value(hit, "fix_script", "") + }) + return metadatas + except Exception: + pass + logger.warning(f"[VectorStore] Search fixes error: {e}") + return [] diff --git a/backend/config.py b/backend/config.py new file mode 100644 index 000000000..d8024d3a8 --- /dev/null +++ b/backend/config.py @@ -0,0 +1,65 @@ +import os +from pydantic_settings import BaseSettings +from typing import Optional + +class Settings(BaseSettings): + # App + APP_NAME: str = "PipeGenie" + APP_VERSION: str = "1.0.0" + DEBUG: bool = True + SECRET_KEY: str = "pipegenie-super-secret-key-change-in-prod" + + # MongoDB + MONGODB_URL: str #= "mongodb://localhost:27017" + MONGODB_DB: str = "pipegenie" + + # Redis + REDIS_URL: str = "redis://localhost:6379" + REDIS_TTL: int = 3600 # 1 hour cache + + # GitHub + GITHUB_TOKEN: str = "" + GITHUB_WEBHOOK_SECRET: str = "pipegenie-webhook-secret" + REPO_WRITEBACK_ENABLED: bool = True + AUTO_OPEN_PR: bool = True + PIPEGENIE_BOT_NAME: str = "PipeGenie Bot" + PIPEGENIE_BOT_EMAIL: str = "pipegenie-bot@users.noreply.github.com" + + # AI / LLM provider + LLM_PROVIDER: str = "gemini" # gemini (default) | ollama | mistral + GEMINI_API_KEY: str = "" + GEMINI_MODEL: str = "gemini-2.5-flash" + + # Legacy/manual options (kept for compatibility) + MISTRAL_API_KEY: str = "" # Manual fallback provider + MISTRAL_MODEL: str = "mistral-large-latest" + OLLAMA_BASE_URL: str = "http://localhost:11434" + USE_OLLAMA: bool = False # Legacy switch; prefer LLM_PROVIDER + LLM_MODEL: str = "mistral" # Ollama model name when LLM_PROVIDER=ollama + + # MilvusDB + MILVUS_HOST: str = "localhost" + MILVUS_PORT: int = 19530 + + # Docker + DOCKER_NETWORK: str = "pipegenie-net" + + # Risk thresholds + RISK_LOW_THRESHOLD: float = 0.3 + RISK_HIGH_THRESHOLD: float = 0.7 + + # CORS + FRONTEND_URL: str = "http://localhost:5173" + + # Observability (SigNoz / OpenTelemetry) + OTEL_ENABLED: bool = True + OTEL_SERVICE_NAME: str = "pipegenie-backend" + OTEL_EXPORTER_OTLP_ENDPOINT: str = "http://127.0.0.1:4318/v1/traces" + OTEL_EXPORTER_OTLP_INSECURE: bool = True + OTEL_RESOURCE_ATTRIBUTES: str = "service.namespace=pipegenie,deployment.environment=dev" + + class Config: + env_file = ".env" + extra = "ignore" + +settings = Settings() diff --git a/backend/executor/__init__.py b/backend/executor/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/backend/executor/docker_runner.py b/backend/executor/docker_runner.py new file mode 100644 index 000000000..b552a7253 --- /dev/null +++ b/backend/executor/docker_runner.py @@ -0,0 +1,248 @@ +""" +Docker Runner – Executes fix scripts inside isolated Docker containers. +""" +import asyncio +import logging +import time +import os +import re +import docker +from docker.errors import DockerException + +from backend.config import settings + +logger = logging.getLogger(__name__) + + +FIX_CONTAINER_IMAGE = "pipegenie-runner:v2" + + +class DockerRunner: + def __init__(self): + try: + self.client = docker.from_env() + self._ensure_image() + except DockerException as e: + logger.warning(f"[DockerRunner] Docker not available: {e}") + self.client = None + + def _ensure_image(self): + """Build the fix runner image if not present.""" + try: + self.client.images.get(FIX_CONTAINER_IMAGE) + logger.info("[DockerRunner] Runner image found") + except Exception: + logger.info("[DockerRunner] Building runner image...") + dockerfile_path = os.path.join( + os.path.dirname(__file__), "..", "..", "docker", "fix_runner" + ) + if os.path.exists(dockerfile_path): + self.client.images.build( + path=os.path.abspath(dockerfile_path), + tag=FIX_CONTAINER_IMAGE, + rm=True + ) + else: + logger.warning("[DockerRunner] Dockerfile not found, using base image") + + async def run_fix(self, fix_script: str, repo_url: str, + branch: str, event_id: str, repo_full_name: str) -> dict: + """ + Execute a fix script inside a Docker container. + Returns: {"exit_code": int, "output": str, "duration": float, "container_id": str} + """ + if not self.client: + return await self._simulate_execution(fix_script) + + start_time = time.time() + container_id = None + fix_branch = self._build_fix_branch(event_id) + + try: + if settings.REPO_WRITEBACK_ENABLED: + # Execute generated fix script against a cloned repository checkout. + script_content = f"""#!/bin/bash +set -e +echo "=== PipeGenie Fix Runner ===" +echo "Event: {event_id}" +echo "Repo: {repo_url}" +echo "Branch: {branch}" +echo "Fix Branch: {fix_branch}" +echo "================================" + +if [ -z "$REPO_FULL_NAME" ]; then + echo "Missing REPO_FULL_NAME" + exit 1 +fi + +AUTH_REPO_URL="https://github.com/$REPO_FULL_NAME.git" +if [ -n "$GITHUB_TOKEN" ]; then + AUTH_REPO_URL="https://x-access-token:$GITHUB_TOKEN@github.com/$REPO_FULL_NAME.git" +fi + +rm -rf /workspace/repo +if ! git clone --depth 1 --branch "$BRANCH" "$AUTH_REPO_URL" /workspace/repo; then + echo "Branch '$BRANCH' not found on remote. Falling back to repository default branch." + rm -rf /workspace/repo + git clone --depth 1 "$AUTH_REPO_URL" /workspace/repo +fi +cd /workspace/repo + +git config user.name "$PIPEGENIE_BOT_NAME" +git config user.email "$PIPEGENIE_BOT_EMAIL" +git checkout -b "$FIX_BRANCH" + +{fix_script} + +if git diff --quiet && git diff --cached --quiet; then + echo "No repository changes detected after fix script" + echo "PIPEGENIE_FIX_BRANCH=$FIX_BRANCH" + exit 0 +fi + +git add -A +git commit -m "chore(pipegenie): apply generated fix for event {event_id}" +git push origin "$FIX_BRANCH" + +echo "PIPEGENIE_FIX_BRANCH=$FIX_BRANCH" + +echo "=== Fix Script Completed ===" +""" + else: + script_content = f"""#!/bin/bash +set -e +echo "=== PipeGenie Script-Only Runner ===" +echo "Event: {event_id}" +echo "Repo: {repo_url}" +echo "Branch: {branch}" +echo "================================" + +{fix_script} + +echo "=== Fix Script Completed ===" +""" + + script_content = self._sanitize_script(script_content) + + use_runner_image = self._image_exists() + image_name = FIX_CONTAINER_IMAGE if use_runner_image else "python:3.11-slim" + # pipegenie-runner image sets ENTRYPOINT to /bin/bash, so pass only args. + command = ["-c", script_content] if use_runner_image else ["bash", "-c", script_content] + + # Run container + container = self.client.containers.run( + image=image_name, + command=command, + environment={ + "REPO_URL": repo_url, + "REPO_FULL_NAME": repo_full_name, + "BRANCH": branch, + "FIX_BRANCH": fix_branch, + "EVENT_ID": event_id, + "PYTHONUNBUFFERED": "1", + "GITHUB_TOKEN": settings.GITHUB_TOKEN, + "PIPEGENIE_BOT_NAME": settings.PIPEGENIE_BOT_NAME, + "PIPEGENIE_BOT_EMAIL": settings.PIPEGENIE_BOT_EMAIL, + }, + mem_limit="512m", + cpu_period=100000, + cpu_quota=50000, # 50% CPU + network_mode="bridge", + remove=True, + detach=False, + stdout=True, + stderr=True + ) + + duration = time.time() - start_time + output = container.decode("utf-8") if isinstance(container, bytes) else str(container) + parsed_fix_branch = self._extract_fix_branch(output) or (fix_branch if settings.REPO_WRITEBACK_ENABLED else None) + + return { + "exit_code": 0, + "output": output, + "duration": duration, + "container_id": container_id or "completed", + "fix_branch": parsed_fix_branch, + } + + except docker.errors.ContainerError as e: + duration = time.time() - start_time + stderr = e.stderr.decode("utf-8") if e.stderr else str(e) + parsed_fix_branch = self._extract_fix_branch(stderr) or (fix_branch if settings.REPO_WRITEBACK_ENABLED else None) + return { + "exit_code": e.exit_status, + "output": stderr, + "duration": duration, + "container_id": container_id or "failed", + "fix_branch": parsed_fix_branch, + } + except Exception as e: + logger.error(f"[DockerRunner] Unexpected error: {e}") + return { + "exit_code": 1, + "output": f"Docker execution failed: {str(e)}", + "duration": time.time() - start_time, + "container_id": "error", + "fix_branch": fix_branch if settings.REPO_WRITEBACK_ENABLED else None, + } + + def _build_fix_branch(self, event_id: str) -> str: + safe = re.sub(r"[^a-zA-Z0-9-]", "-", event_id)[:24] + return f"pipegenie/fix-{safe}" + + def _extract_fix_branch(self, output: str) -> str | None: + match = re.search(r"PIPEGENIE_FIX_BRANCH=([^\s]+)", output or "") + return match.group(1) if match else None + + def _sanitize_script(self, script: str) -> str: + """Fix known shell-command incompatibilities in model-generated scripts.""" + sanitized = script + replacements = { + "--max-redirects": "--max-redirs", + } + + for old, new in replacements.items(): + if old in sanitized: + sanitized = sanitized.replace(old, new) + logger.warning( + f"[DockerRunner] Replaced unsupported flag '{old}' with '{new}'" + ) + + # The runner container already executes as root; sudo often fails because it is absent. + if "sudo " in sanitized: + sanitized = re.sub(r"\bsudo\s+", "", sanitized) + logger.warning("[DockerRunner] Removed 'sudo' from generated script for container execution") + + return sanitized + + def _image_exists(self) -> bool: + try: + self.client.images.get(FIX_CONTAINER_IMAGE) + return True + except Exception: + return False + + async def _simulate_execution(self, fix_script: str) -> dict: + """Simulate execution when Docker is not available (dev mode).""" + logger.warning("[DockerRunner] Simulating Docker execution (Docker not available)") + await asyncio.sleep(2) # Simulate execution time + + # Basic script validation + dangerous = ["rm -rf /", "DROP TABLE", "DELETE FROM"] + for d in dangerous: + if d in fix_script: + return { + "exit_code": 1, + "output": f"BLOCKED: Dangerous command detected: {d}", + "duration": 2.0, + "container_id": "simulated-blocked" + } + + return { + "exit_code": 0, + "output": f"[SIMULATED] Fix script executed successfully:\n{fix_script[:300]}\n[DONE]", + "duration": 2.0, + "container_id": "simulated-success", + "fix_branch": "pipegenie/fix-simulated" + } diff --git a/backend/guardian/__init__.py b/backend/guardian/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/backend/guardian/risk_evaluator.py b/backend/guardian/risk_evaluator.py new file mode 100644 index 000000000..23cb82e1b --- /dev/null +++ b/backend/guardian/risk_evaluator.py @@ -0,0 +1,249 @@ +""" +Guardian – Risk Evaluator Module +Scores the risk of a proposed fix using multi-factor analysis. +""" +import logging +import re +from typing import List + +from backend.config import settings + +logger = logging.getLogger(__name__) + + +class RiskEvaluator: + """ + Multi-factor risk scoring engine. + Score range: 0.0 (safe) → 1.0 (dangerous) + """ + + # Dangerous patterns in fix scripts + DANGEROUS_PATTERNS = [ + (r"\brm\s+-rf\s+/", 0.9, "Attempts to delete root filesystem"), + (r"\brm\s+-rf\s+\.", 0.6, "Attempts to delete entire current directory"), + (r"sudo\s+rm", 0.5, "Uses sudo to delete files"), + (r"\bsudo\b", 0.25, "Uses sudo privilege escalation"), + (r"/(etc|var|usr|opt|root)/", 0.25, "Touches host-level system paths"), + (r"DROP\s+TABLE", 0.8, "Attempts to drop database table"), + (r"DROP\s+DATABASE", 0.9, "Attempts to drop entire database"), + (r"ALTER\s+TABLE.*DROP", 0.6, "Attempts to drop database column"), + (r"\btruncate\b", 0.7, "Attempts to truncate table data"), + (r"chmod\s+777", 0.5, "Sets insecure world-writable permissions"), + (r"curl.*\|\s*bash", 0.7, "Pipes remote script directly to bash"), + (r"wget.*\|\s*sh", 0.7, "Pipes remote script directly to sh"), + (r"git\s+push.*--force", 0.6, "Force pushes to repository"), + (r"git\s+reset\s+--hard\s+HEAD~", 0.5, "Hard resets Git history"), + (r"kubectl\s+delete\s+namespace", 0.9, "Deletes Kubernetes namespace"), + (r"kubectl\s+delete\s+all", 0.8, "Deletes all Kubernetes resources"), + (r"aws\s+s3\s+rm\s+--recursive", 0.7, "Recursively deletes S3 bucket"), + (r"eval\s+\$", 0.6, "Uses eval with variable (injection risk)"), + (r":\s*\(\)\s*\{", 0.9, "Fork bomb pattern detected"), + ] + + SAFE_PATTERNS = [ + (r"pip\s+install", -0.1, "Safe: pip install"), + (r"npm\s+install", -0.1, "Safe: npm install"), + (r"apt-get\s+install\s+-y", -0.05, "Safe: apt-get install"), + (r"set\s+-e", -0.05, "Good: exits on error"), + (r"if\s+\[.*\];\s*then", -0.05, "Good: conditional checks present"), + (r"echo\s+", -0.02, "Safe: echo statements"), + ] + + BRANCH_RISK = { + "main": 0.2, + "master": 0.2, + "production": 0.25, + "prod": 0.25, + "release": 0.15, + "develop": 0.05, + "dev": 0.05, + "staging": 0.1, + "feature": 0.0, + "fix": 0.0, + "hotfix": 0.1, + } + + FIX_TYPE_RISK = { + "dependency": 0.1, + "config": 0.2, + "patch": 0.15, + "build": 0.1, + "test": 0.05, + "permissions": 0.2, + "network": 0.1, + "manual": 0.4, + } + + # Commands that are usually valid but can still impact runtime behavior/state. + OPERATIONAL_PATTERNS = [ + (r"\bsed\b.*\s-i\b", 0.18, "Performs in-place file modifications"), + (r"\b(tee|cat)\b.*>\s*[^>\n]+", 0.14, "Writes configuration/content into files"), + (r"\bmv\b\s+[^\n]+", 0.1, "Moves or renames files"), + (r"\bcp\b\s+[^\n]+", 0.08, "Copies files in repository"), + (r"\b(git\s+checkout|git\s+commit)\b", 0.14, "Mutates repository state"), + (r"\b(docker\s+compose|docker\s+build|docker\s+run)\b", 0.12, "Touches container build/runtime behavior"), + (r"\b(terraform\s+apply|kubectl\s+apply)\b", 0.25, "Applies infra/environment changes"), + ] + + TIMING_PATTERNS = [ + (r"\b(pip|pip3)\s+install\b", 18.0, "Python dependency installation"), + (r"\bnpm\s+(ci|install)\b", 24.0, "Node dependency installation"), + (r"\bapt(-get)?\s+install\b", 20.0, "OS package installation"), + (r"\b(pytest|npm\s+test|go\s+test)\b", 14.0, "Test execution"), + (r"\b(npm\s+run\s+build|mvn\s+package|gradle\s+build)\b", 26.0, "Build step execution"), + (r"\b(curl|wget|git\s+clone)\b", 12.0, "Network/download operation"), + (r"\bsleep\b", 10.0, "Explicit waiting in script"), + ] + + def _estimate_timing(self, script: str) -> dict: + non_empty_lines = [line for line in script.split("\n") if line.strip()] + estimated_seconds = 6.0 + min(len(non_empty_lines) * 1.4, 50.0) + reasons = [f"Baseline from {len(non_empty_lines)} executable lines"] + + for pattern, delta_seconds, reason in self.TIMING_PATTERNS: + if re.search(pattern, script, re.IGNORECASE): + estimated_seconds += delta_seconds + reasons.append(f"+{int(delta_seconds)}s from {reason}") + + estimated_seconds = round(min(max(estimated_seconds, 5.0), 360.0), 1) + if estimated_seconds <= 20: + level = "fast" + elif estimated_seconds <= 60: + level = "moderate" + else: + level = "slow" + + return { + "estimated_seconds": estimated_seconds, + "level": level, + "reasons": reasons, + } + + def evaluate(self, fix: dict, diagnosis: dict, repo: str, branch: str) -> dict: + """ + Return a risk report: + { + "score": 0.0-1.0, + "level": "low|medium|high", + "reasons": [...], + "breakdown": {...} + } + """ + reasons: List[str] = [] + breakdown = {} + total_score = 0.0 + + script = fix.get("fix_script", "") + script_lower = script.lower() + + # 1. Script pattern analysis + script_score = 0.0 + for pattern, weight, reason in self.DANGEROUS_PATTERNS: + if re.search(pattern, script, re.IGNORECASE): + script_score += weight + reasons.append(f"⚠️ {reason}") + + for pattern, weight, reason in self.OPERATIONAL_PATTERNS: + if re.search(pattern, script, re.IGNORECASE): + script_score += weight + reasons.append(f"⚠️ {reason}") + + for pattern, weight, reason in self.SAFE_PATTERNS: + if re.search(pattern, script, re.IGNORECASE): + script_score += weight # weight is negative here + + script_score = max(0.0, min(script_score, 1.0)) + breakdown["script_analysis"] = script_score + total_score += script_score * 0.4 # 40% weight + + # 2. Branch risk + branch_lower = branch.lower() + branch_risk = 0.05 # default + for key, risk in self.BRANCH_RISK.items(): + if key in branch_lower: + branch_risk = risk + if risk >= 0.2: + reasons.append(f"⚠️ Targeting protected branch: {branch}") + break + protected_branch = branch_risk >= 0.2 + breakdown["branch_risk"] = branch_risk + total_score += branch_risk * 0.3 # 30% weight + + # 3. Fix type risk + fix_type = fix.get("fix_type", "manual").lower() + fix_type_risk = self.FIX_TYPE_RISK.get(fix_type, 0.3) + breakdown["fix_type_risk"] = fix_type_risk + total_score += fix_type_risk * 0.2 # 20% weight + + # 4. Script complexity (longer = riskier) + lines = len([l for l in script.split("\n") if l.strip()]) + complexity_score = min(lines / 50, 0.5) # max 0.5 for very long scripts + breakdown["complexity"] = complexity_score + breakdown["line_count"] = lines + total_score += complexity_score * 0.1 # 10% weight + + # 5. Execution timing estimate (long-running scripts increase execution exposure). + timing = self._estimate_timing(script) + breakdown["estimated_duration_seconds"] = timing["estimated_seconds"] + if timing["level"] == "moderate": + total_score += 0.03 + reasons.append("⚠️ Estimated execution time is moderate") + elif timing["level"] == "slow": + total_score += 0.08 + reasons.append("⚠️ Estimated execution time is slow") + + # 6. Guardrail for protected branches with scripts that mutate runtime or dependencies. + protected_branch_runtime_ops = any( + token in script_lower + for token in ("pip install", "npm install", "apt-get install", "curl", "wget", "sed -i", "poetry ") + ) + protected_branch_privileged_ops = any( + token in script_lower + for token in ("sudo ", "/etc/", "/var/", "/usr/", "/opt/", "/root/") + ) + if protected_branch and protected_branch_runtime_ops: + total_score = max(total_score, 0.34) + reasons.append("⚠️ Protected branch + runtime-impacting changes require closer review") + if protected_branch and protected_branch_privileged_ops: + total_score = max(total_score, 0.4) + reasons.append("⚠️ Protected branch + privileged/system-path changes are medium+ risk") + + # 7. LLM-estimated risk as override check + llm_risk_raw = fix.get("estimated_risk", 0.3) + try: + llm_risk = float(llm_risk_raw) + except (TypeError, ValueError): + llm_risk = 0.3 + llm_risk = min(max(llm_risk, 0.0), 1.0) + + if llm_risk > 0.5: + reasons.append(f"⚠️ AI agent estimated elevated risk ({llm_risk:.2f})") + total_score = max(total_score, llm_risk * 0.7) + if llm_risk > 0.7: + reasons.append(f"⚠️ AI agent flagged high estimated risk ({llm_risk:.2f})") + total_score = max(total_score, llm_risk * 0.8) + + # Clamp final score + final_score = round(min(max(total_score, 0.0), 1.0), 3) + + # Determine level + if final_score <= settings.RISK_LOW_THRESHOLD: + level = "low" + elif final_score <= settings.RISK_HIGH_THRESHOLD: + level = "medium" + else: + level = "high" + + if not reasons: + reasons.append("✅ No dangerous patterns detected") + + logger.info(f"[Guardian] Risk score: {final_score} ({level}) | Reasons: {len(reasons)}") + + return { + "score": final_score, + "level": level, + "reasons": reasons, + "breakdown": breakdown, + "timing": timing, + "auto_approve": level in ("low", "medium") + } diff --git a/backend/main.py b/backend/main.py new file mode 100644 index 000000000..86f4adeb0 --- /dev/null +++ b/backend/main.py @@ -0,0 +1,125 @@ +""" +PipeGenie – FastAPI Backend Entry Point +""" +import logging +from contextlib import asynccontextmanager + +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware +from motor.motor_asyncio import AsyncIOMotorClient +from beanie import init_beanie +import redis.asyncio as redis + +from backend.config import settings +from backend.models.pipeline_event import PipelineEvent +from backend.models.approval_request import ApprovalRequest +from backend.models.fix_record import FixRecord +from backend.routes import webhook, approvals, dashboard +from backend.agents.orchestrator import AgentOrchestrator +from backend.services.websocket_manager import WebSocketManager +from backend.observability import setup_observability + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s" +) +logger = logging.getLogger(__name__) + + +@asynccontextmanager +async def lifespan(app: FastAPI): + """Startup and shutdown lifecycle.""" + logger.info("🧞 PipeGenie starting up...") + + # Init MongoDB + client = AsyncIOMotorClient(settings.MONGODB_URL) + await init_beanie( + database=client[settings.MONGODB_DB], + document_models=[PipelineEvent, ApprovalRequest, FixRecord] + ) + logger.info(f"✅ MongoDB connected: {settings.MONGODB_DB}") + + # Init Redis (optional) + try: + redis_client = redis.from_url(settings.REDIS_URL) + await redis_client.ping() + app.state.redis = redis_client + logger.info("✅ Redis connected") + except Exception as e: + logger.warning(f"⚠️ Redis not available: {e} (continuing without cache)") + app.state.redis = None + + # Init WebSocket manager + ws_manager = WebSocketManager() + app.state.ws_manager = ws_manager + + # Init Agent Orchestrator + orchestrator = AgentOrchestrator(ws_manager=ws_manager) + app.state.orchestrator = orchestrator + logger.info("✅ Agent Orchestrator ready") + + logger.info("🚀 PipeGenie is live!") + yield + + # Shutdown + client.close() + if app.state.redis: + await app.state.redis.close() + logger.info("👋 PipeGenie shut down") + + +app = FastAPI( + title="PipeGenie API", + description="AI-powered CI/CD pipeline auto-remediation system", + version=settings.APP_VERSION, + lifespan=lifespan +) +setup_observability(app) + +# CORS +app.add_middleware( + CORSMiddleware, + allow_origins=[settings.FRONTEND_URL, "http://localhost:3000", "http://localhost:5173"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"] +) + +# Register routes +app.include_router(webhook.router, prefix="/api") +app.include_router(approvals.router, prefix="/api") +app.include_router(dashboard.router, prefix="/api") + + +@app.get("/") +async def root(): + return { + "name": "PipeGenie", + "version": settings.APP_VERSION, + "status": "running", + "docs": "/docs" + } + + +@app.get("/health") +async def health(): + llm_provider = (settings.LLM_PROVIDER or "").strip().lower() + llm_model = { + "gemini": settings.GEMINI_MODEL, + "mistral": settings.MISTRAL_MODEL, + "ollama": settings.LLM_MODEL, + }.get(llm_provider, settings.GEMINI_MODEL) + + return { + "status": "healthy", + "mongodb": "connected", + "redis": "connected" if app.state.redis else "unavailable", + "llm_provider": llm_provider or "gemini", + "llm_model": llm_model, + "llm_endpoint": settings.OLLAMA_BASE_URL if llm_provider == "ollama" else "managed-api", + } + + +if __name__ == "__main__": + import uvicorn + uvicorn.run("backend.main:app", host="0.0.0.0", port=8000, reload=True) diff --git a/backend/models/__init__.py b/backend/models/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/backend/models/approval_request.py b/backend/models/approval_request.py new file mode 100644 index 000000000..812af9019 --- /dev/null +++ b/backend/models/approval_request.py @@ -0,0 +1,37 @@ +from beanie import Document +from pydantic import Field +from datetime import datetime +from typing import Optional +from enum import Enum + + +class ApprovalStatus(str, Enum): + PENDING = "pending" + APPROVED = "approved" + REJECTED = "rejected" + EXPIRED = "expired" + + +class ApprovalRequest(Document): + event_id: str = Field(..., description="Linked PipelineEvent ID") + repo_full_name: str + branch: str + commit_sha: str + root_cause: str + proposed_fix: str + fix_script: str + risk_score: float + risk_level: str + risk_reasons: list[str] = Field(default_factory=list) + estimated_duration_seconds: float = 0.0 + timing_level: str = "unknown" + timing_reasons: list[str] = Field(default_factory=list) + status: ApprovalStatus = ApprovalStatus.PENDING + reviewer_note: Optional[str] = None + reviewed_by: Optional[str] = None + reviewed_at: Optional[datetime] = None + expires_at: Optional[datetime] = None + created_at: datetime = Field(default_factory=datetime.utcnow) + + class Settings: + name = "approval_requests" diff --git a/backend/models/fix_record.py b/backend/models/fix_record.py new file mode 100644 index 000000000..8b34105f9 --- /dev/null +++ b/backend/models/fix_record.py @@ -0,0 +1,29 @@ +from beanie import Document +from pydantic import Field +from datetime import datetime +from typing import Optional, Dict, Any +from enum import Enum + + +class FixStatus(str, Enum): + SUCCESS = "success" + FAILED = "failed" + PARTIAL = "partial" + + +class FixRecord(Document): + event_id: str + repo_full_name: str + fix_type: str # dependency, config, patch, etc. + fix_script: str + fix_output: str = "" + exit_code: int = 0 + status: FixStatus = FixStatus.SUCCESS + duration_seconds: float = 0.0 + auto_applied: bool = True + container_id: Optional[str] = None + created_at: datetime = Field(default_factory=datetime.utcnow) + metadata: Dict[str, Any] = Field(default_factory=dict) + + class Settings: + name = "fix_records" diff --git a/backend/models/pipeline_event.py b/backend/models/pipeline_event.py new file mode 100644 index 000000000..e78a050ab --- /dev/null +++ b/backend/models/pipeline_event.py @@ -0,0 +1,59 @@ +from beanie import Document +from pydantic import Field +from datetime import datetime +from typing import Optional, List, Dict, Any +from enum import Enum + + +class PipelineStatus(str, Enum): + FAILED = "failed" + DIAGNOSING = "diagnosing" + FIX_PENDING = "fix_pending" + AWAITING_APPROVAL = "awaiting_approval" + FIXING = "fixing" + FIXED = "fixed" + FAILED_TO_FIX = "failed_to_fix" + RETRYING = "retrying" + + +class FailureCategory(str, Enum): + DEPENDENCY = "dependency_error" + TEST_FAILURE = "test_failure" + BUILD_ERROR = "build_error" + CONFIG_ERROR = "config_error" + NETWORK_ERROR = "network_error" + PERMISSIONS = "permissions_error" + UNKNOWN = "unknown" + + +class PipelineEvent(Document): + event_id: str = Field(..., description="GitHub Actions run ID") + repo_full_name: str = Field(..., description="owner/repo") + repo_name: str + branch: str + commit_sha: str + commit_message: str + workflow_name: str + job_name: Optional[str] = None + status: PipelineStatus = PipelineStatus.FAILED + failure_category: Optional[FailureCategory] = None + raw_logs: str = "" + log_summary: str = "" + root_cause: Optional[str] = None + proposed_fix: Optional[str] = None + fix_script: Optional[str] = None + risk_score: Optional[float] = None + risk_level: Optional[str] = None # low / medium / high + fix_applied: bool = False + fix_output: Optional[str] = None + re_run_triggered: bool = False + re_run_success: Optional[bool] = None + metadata: Dict[str, Any] = Field(default_factory=dict) + created_at: datetime = Field(default_factory=datetime.utcnow) + updated_at: datetime = Field(default_factory=datetime.utcnow) + + class Settings: + name = "pipeline_events" + + def update_timestamp(self): + self.updated_at = datetime.utcnow() diff --git a/backend/observability.py b/backend/observability.py new file mode 100644 index 000000000..c18b6b4ef --- /dev/null +++ b/backend/observability.py @@ -0,0 +1,100 @@ +""" +OpenTelemetry bootstrap for exporting traces to SigNoz via OTLP. +""" +import logging +from typing import Dict + +from opentelemetry import trace +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor +from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor +from opentelemetry.instrumentation.logging import LoggingInstrumentor +from opentelemetry.instrumentation.redis import RedisInstrumentor +from opentelemetry.sdk.resources import Resource, SERVICE_NAME +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor + +from backend.config import settings + +logger = logging.getLogger(__name__) +_initialized = False + + +def _parse_resource_attributes(raw: str) -> Dict[str, str]: + attrs: Dict[str, str] = {} + if not raw: + return attrs + + for item in raw.split(","): + pair = item.strip() + if not pair or "=" not in pair: + continue + key, value = pair.split("=", 1) + key = key.strip() + value = value.strip() + if key: + attrs[key] = value + return attrs + + +def _normalize_otlp_endpoint(endpoint: str) -> str: + """Normalize endpoint for OTLP HTTP trace exporter. + + Expected form for SigNoz local collector is: + http://127.0.0.1:4318/v1/traces + """ + if not endpoint: + return "http://127.0.0.1:4318/v1/traces" + + normalized = endpoint.strip().rstrip("/") + + if not normalized.startswith("http://") and not normalized.startswith("https://"): + normalized = f"http://{normalized}" + + if not normalized.endswith("/v1/traces"): + normalized = f"{normalized}/v1/traces" + + return normalized + + +def setup_observability(app) -> None: + """Configure tracing and common instrumentations for FastAPI runtime.""" + global _initialized + + if _initialized: + return + + if not settings.OTEL_ENABLED: + logger.info("[Observability] OTEL disabled") + _initialized = True + return + + try: + attrs = { + SERVICE_NAME: settings.OTEL_SERVICE_NAME, + "service.version": settings.APP_VERSION, + } + attrs.update(_parse_resource_attributes(settings.OTEL_RESOURCE_ATTRIBUTES)) + + resource = Resource.create(attrs) + tracer_provider = TracerProvider(resource=resource) + + exporter = OTLPSpanExporter( + endpoint=_normalize_otlp_endpoint(settings.OTEL_EXPORTER_OTLP_ENDPOINT), + ) + tracer_provider.add_span_processor(BatchSpanProcessor(exporter)) + trace.set_tracer_provider(tracer_provider) + + FastAPIInstrumentor.instrument_app(app, tracer_provider=tracer_provider) + HTTPXClientInstrumentor().instrument(tracer_provider=tracer_provider) + RedisInstrumentor().instrument(tracer_provider=tracer_provider) + LoggingInstrumentor().instrument(set_logging_format=False) + + logger.info( + "[Observability] OTLP exporter configured for %s", + settings.OTEL_EXPORTER_OTLP_ENDPOINT, + ) + _initialized = True + except Exception as exc: + logger.warning("[Observability] Failed to initialize tracing: %s", exc) + _initialized = True diff --git a/backend/requirements.txt b/backend/requirements.txt new file mode 100644 index 000000000..f958d50b3 --- /dev/null +++ b/backend/requirements.txt @@ -0,0 +1,50 @@ +# PipeGenie Backend Dependencies +fastapi==0.111.0 +uvicorn[standard]==0.30.1 +pydantic==2.7.4 +pydantic-settings==2.3.4 + +# MongoDB +motor==3.6.0 +beanie==1.26.0 + +# Redis +redis==5.0.6 + +# AI / LangChain +langchain==0.2.6 +langchain-core==0.2.10 +langchain-community==0.2.6 +langchain-mistralai==0.1.8 +google-genai==1.13.0 + +# Observability (SigNoz / OpenTelemetry) +opentelemetry-api==1.27.0 +opentelemetry-sdk==1.27.0 +opentelemetry-distro==0.48b0 +opentelemetry-exporter-otlp==1.27.0 +opentelemetry-instrumentation-fastapi==0.48b0 +opentelemetry-instrumentation-httpx==0.48b0 +opentelemetry-instrumentation-logging==0.48b0 +opentelemetry-instrumentation-redis==0.48b0 + +# MilvusDB +pymilvus==2.4.0 +setuptools<81 +marshmallow<4 + +# GitHub & HTTP +httpx==0.28.1 +PyGithub==2.3.0 + +# Docker +docker==7.1.0 + +# Utilities +python-multipart==0.0.9 +python-jose[cryptography]==3.3.0 +passlib[bcrypt]==1.7.4 +python-dotenv==1.0.1 + +# Testing +pytest==8.3.3 diff --git a/backend/routes/__init__.py b/backend/routes/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/backend/routes/approvals.py b/backend/routes/approvals.py new file mode 100644 index 000000000..11a8e402e --- /dev/null +++ b/backend/routes/approvals.py @@ -0,0 +1,181 @@ +""" +Approvals route – human-in-the-loop approval interface. +""" +import logging +from datetime import datetime, timezone +from typing import Optional +from fastapi import APIRouter, HTTPException, Request, Depends +from pydantic import BaseModel + +from backend.models.approval_request import ApprovalRequest, ApprovalStatus +from backend.models.pipeline_event import PipelineEvent +from backend.guardian.risk_evaluator import RiskEvaluator + +logger = logging.getLogger(__name__) +router = APIRouter(prefix="/approvals", tags=["approvals"]) +_risk_evaluator = RiskEvaluator() +RISK_EVALUATION_VERSION = 4 + + +def get_orchestrator(request: Request): + return request.app.state.orchestrator + + +class ApprovalAction(BaseModel): + reviewer: str = "admin" + note: Optional[str] = "" + edited_fix_script: Optional[str] = None + + +@router.get("/pending") +async def list_pending_approvals(): + """List all pending approval requests.""" + approvals = await ApprovalRequest.find( + ApprovalRequest.status == ApprovalStatus.PENDING + ).sort("-created_at").to_list() + + # Re-score with the latest rules so older pending approvals don't stay stale. + for approval in approvals: + await _refresh_pending_assessment(approval) + + return { + "total": len(approvals), + "approvals": [_serialize_approval(a) for a in approvals] + } + + +@router.get("/{approval_id}") +async def get_approval(approval_id: str): + """Get a specific approval request.""" + approval = await ApprovalRequest.get(approval_id) + if not approval: + raise HTTPException(404, "Approval not found") + return _serialize_approval(approval) + + +@router.post("/{approval_id}/approve") +async def approve_fix( + approval_id: str, + action: ApprovalAction, + orchestrator=Depends(get_orchestrator) +): + """Approve a high-risk fix for execution.""" + try: + await orchestrator.execute_approved_fix( + approval_id=approval_id, + reviewer=action.reviewer, + note=action.note, + edited_fix_script=action.edited_fix_script, + ) + return {"status": "approved", "message": "Fix approved and executing..."} + except ValueError as e: + raise HTTPException(400, str(e)) + except Exception as e: + logger.error(f"Approval execution error: {e}") + raise HTTPException(500, f"Failed to execute fix: {str(e)}") + + +@router.post("/{approval_id}/reject") +async def reject_fix( + approval_id: str, + action: ApprovalAction, + orchestrator=Depends(get_orchestrator) +): + """Reject a fix request.""" + try: + await orchestrator.reject_fix( + approval_id=approval_id, + reviewer=action.reviewer, + note=action.note + ) + return {"status": "rejected", "message": "Fix rejected successfully"} + except ValueError as e: + raise HTTPException(400, str(e)) + + +@router.get("/history/all") +async def approval_history(): + """Get full approval history.""" + approvals = await ApprovalRequest.find_all().sort("-created_at").limit(50).to_list() + return {"approvals": [_serialize_approval(a) for a in approvals]} + + +def _serialize_approval(a: ApprovalRequest) -> dict: + return { + "id": str(a.id), + "event_id": a.event_id, + "repo_full_name": a.repo_full_name, + "branch": a.branch, + "commit_sha": a.commit_sha[:8] if a.commit_sha else "", + "root_cause": a.root_cause, + "proposed_fix": a.proposed_fix, + "fix_script": a.fix_script, + "risk_score": a.risk_score, + "risk_level": a.risk_level, + "risk_reasons": a.risk_reasons, + "estimated_duration_seconds": a.estimated_duration_seconds, + "timing_level": a.timing_level, + "timing_reasons": a.timing_reasons, + "status": a.status, + "reviewer_note": a.reviewer_note, + "reviewed_by": a.reviewed_by, + "reviewed_at": _to_utc_iso(a.reviewed_at) if a.reviewed_at else None, + "expires_at": _to_utc_iso(a.expires_at) if a.expires_at else None, + "created_at": _to_utc_iso(a.created_at) + } + + +def _to_utc_iso(value: datetime) -> str: + if value.tzinfo is None: + value = value.replace(tzinfo=timezone.utc) + return value.astimezone(timezone.utc).isoformat().replace("+00:00", "Z") + + +async def _refresh_pending_assessment(approval: ApprovalRequest) -> None: + if approval.status != ApprovalStatus.PENDING: + return + + event = await PipelineEvent.get(approval.event_id) + event_risk = event.metadata.get("risk", {}) if event and isinstance(event.metadata, dict) and isinstance(event.metadata.get("risk"), dict) else {} + event_risk_version = int(event.metadata.get("risk_version", 0)) if event and isinstance(event.metadata, dict) else 0 + + stale_risk = approval.risk_score is None or approval.risk_score <= 0.15 + stale_timing = (approval.estimated_duration_seconds or 0.0) <= 0.0 or approval.timing_level in ("", "unknown", None) + score_mismatch = bool(event_risk) and abs(float(event_risk.get("score", approval.risk_score or 0.0)) - float(approval.risk_score or 0.0)) > 0.001 + should_refresh = stale_risk or stale_timing or score_mismatch or (event_risk_version < RISK_EVALUATION_VERSION) + if not should_refresh: + return + + fix_meta = event.metadata.get("fix", {}) if event and isinstance(event.metadata, dict) else {} + diagnosis = event.metadata.get("diagnosis", {}) if event and isinstance(event.metadata, dict) else {} + + fix_payload = { + "fix_type": fix_meta.get("fix_type", "manual"), + "fix_description": fix_meta.get("fix_description") or approval.proposed_fix, + "fix_script": fix_meta.get("fix_script") or approval.fix_script, + "estimated_risk": fix_meta.get("estimated_risk", 0.3), + } + + risk = _risk_evaluator.evaluate( + fix=fix_payload, + diagnosis=diagnosis if isinstance(diagnosis, dict) else {}, + repo=approval.repo_full_name, + branch=approval.branch, + ) + + timing = risk.get("timing", {}) if isinstance(risk, dict) else {} + approval.risk_score = risk.get("score", approval.risk_score) + approval.risk_level = risk.get("level", approval.risk_level) + approval.risk_reasons = risk.get("reasons", approval.risk_reasons) + approval.estimated_duration_seconds = float(timing.get("estimated_seconds", 0.0) or 0.0) + approval.timing_level = timing.get("level", "unknown") + approval.timing_reasons = timing.get("reasons", []) + await approval.save() + + if event: + event.risk_score = approval.risk_score + event.risk_level = approval.risk_level + event.metadata["risk"] = risk + event.metadata["risk_version"] = RISK_EVALUATION_VERSION + event.update_timestamp() + await event.save() diff --git a/backend/routes/dashboard.py b/backend/routes/dashboard.py new file mode 100644 index 000000000..5e749307b --- /dev/null +++ b/backend/routes/dashboard.py @@ -0,0 +1,242 @@ +""" +Dashboard route – real-time stats and event listing. +""" +import logging +from datetime import datetime, timedelta, timezone +from typing import Optional +from fastapi import APIRouter, Query, WebSocket, WebSocketDisconnect, Request +from beanie.operators import In + +from backend.models.pipeline_event import PipelineEvent, PipelineStatus +from backend.models.approval_request import ApprovalRequest, ApprovalStatus +from backend.models.fix_record import FixRecord, FixStatus +from backend.guardian.risk_evaluator import RiskEvaluator + +logger = logging.getLogger(__name__) +router = APIRouter(prefix="/dashboard", tags=["dashboard"]) +_risk_evaluator = RiskEvaluator() +RISK_EVALUATION_VERSION = 4 + + +@router.get("/stats") +async def get_stats(): + """Overall system statistics for the dashboard.""" + total_events = await PipelineEvent.count() + fixed = await PipelineEvent.find(PipelineEvent.status == PipelineStatus.FIXED).count() + failed_to_fix = await PipelineEvent.find(PipelineEvent.status == PipelineStatus.FAILED_TO_FIX).count() + awaiting = await PipelineEvent.find(PipelineEvent.status == PipelineStatus.AWAITING_APPROVAL).count() + in_progress = await PipelineEvent.find( + In(PipelineEvent.status, [ + PipelineStatus.DIAGNOSING, PipelineStatus.FIX_PENDING, + PipelineStatus.FIXING, PipelineStatus.RETRYING + ]) + ).count() + + # Success rate + resolved = fixed + failed_to_fix + success_rate = (fixed / resolved * 100) if resolved > 0 else 0 + + # Fix records + total_fixes = await FixRecord.count() + successful_fixes = await FixRecord.find(FixRecord.status == FixStatus.SUCCESS).count() + auto_fixed = await FixRecord.find(FixRecord.auto_applied == True).count() + + # Pending approvals + pending_approvals = await ApprovalRequest.find( + ApprovalRequest.status == ApprovalStatus.PENDING + ).count() + + # Recent 24h stats + since = datetime.utcnow() - timedelta(hours=24) + recent_events = await PipelineEvent.find( + PipelineEvent.created_at >= since + ).count() + + return { + "total_events": total_events, + "fixed": fixed, + "failed_to_fix": failed_to_fix, + "awaiting_approval": awaiting, + "in_progress": in_progress, + "success_rate": round(success_rate, 1), + "total_fixes": total_fixes, + "successful_fixes": successful_fixes, + "auto_fixed": auto_fixed, + "pending_approvals": pending_approvals, + "events_last_24h": recent_events + } + + +@router.get("/events") +async def list_events( + page: int = Query(1, ge=1), + limit: int = Query(20, le=100), + status: Optional[str] = None, + repo: Optional[str] = None +): + """Paginated list of pipeline events.""" + query = PipelineEvent.find() + + if status: + try: + s = PipelineStatus(status) + query = PipelineEvent.find(PipelineEvent.status == s) + except ValueError: + pass + + if repo: + query = query.find(PipelineEvent.repo_full_name == repo) + + total = await PipelineEvent.count() + events = await query.sort("-created_at").skip((page - 1) * limit).limit(limit).to_list() + + for event in events: + await _refresh_event_assessment(event) + + return { + "total": total, + "page": page, + "limit": limit, + "events": [_serialize_event(e) for e in events] + } + + +@router.get("/events/{event_id}") +async def get_event(event_id: str): + """Get detailed info about a specific pipeline event.""" + # Try by MongoDB ID first + try: + event = await PipelineEvent.get(event_id) + except Exception: + event = None + + # Try by GitHub run ID + if not event: + event = await PipelineEvent.find_one(PipelineEvent.event_id == event_id) + + if not event: + from fastapi import HTTPException + raise HTTPException(404, "Event not found") + + await _refresh_event_assessment(event) + + return _serialize_event(event, include_logs=True) + + +@router.get("/repositories") +async def get_repositories(): + """List unique repositories with stats.""" + pipeline = [ + {"$group": {"_id": "$repo_full_name", "count": {"$sum": 1}, + "fixed": {"$sum": {"$cond": [{"$eq": ["$status", "fixed"]}, 1, 0]}}}}, + {"$sort": {"count": -1}}, + {"$limit": 20} + ] + try: + results = await PipelineEvent.aggregate(pipeline).to_list() + return {"repositories": [{"repo": r["_id"], "total": r["count"], "fixed": r["fixed"]} + for r in results]} + except Exception: + return {"repositories": []} + + +@router.websocket("/ws") +async def websocket_endpoint(websocket: WebSocket): + """WebSocket endpoint for real-time dashboard updates.""" + app = websocket.app + manager = app.state.ws_manager + await manager.connect(websocket) + try: + while True: + # Keep connection alive, send ping every 30s + data = await websocket.receive_text() + if data == "ping": + await websocket.send_text('{"type":"pong"}') + except WebSocketDisconnect: + manager.disconnect(websocket) + + +def _serialize_event(event: PipelineEvent, include_logs: bool = False) -> dict: + risk_meta = event.metadata.get("risk", {}) if isinstance(event.metadata, dict) else {} + timing_meta = risk_meta.get("timing", {}) if isinstance(risk_meta, dict) else {} + + data = { + "id": str(event.id), + "event_id": event.event_id, + "repo_full_name": event.repo_full_name, + "branch": event.branch, + "commit_sha": event.commit_sha[:8] if event.commit_sha else "", + "commit_message": event.commit_message[:100] if event.commit_message else "", + "workflow_name": event.workflow_name, + "status": event.status, + "failure_category": event.failure_category, + "root_cause": event.root_cause, + "proposed_fix": event.proposed_fix, + "risk_score": event.risk_score, + "risk_level": event.risk_level, + "estimated_duration_seconds": timing_meta.get("estimated_seconds"), + "timing_level": timing_meta.get("level"), + "fix_applied": event.fix_applied, + "re_run_triggered": event.re_run_triggered, + "re_run_success": event.re_run_success, + "created_at": _to_utc_iso(event.created_at), + "updated_at": _to_utc_iso(event.updated_at) + } + if include_logs: + data["raw_logs"] = event.raw_logs + data["log_summary"] = event.log_summary + data["fix_script"] = event.fix_script + data["fix_output"] = event.fix_output + data["metadata"] = event.metadata + return data + + +def _to_utc_iso(value: datetime) -> str: + if value.tzinfo is None: + value = value.replace(tzinfo=timezone.utc) + return value.astimezone(timezone.utc).isoformat().replace("+00:00", "Z") + + +async def _refresh_event_assessment(event: PipelineEvent) -> None: + if not isinstance(event.metadata, dict): + return + + fix_meta = event.metadata.get("fix", {}) + if not isinstance(fix_meta, dict): + fix_meta = {} + + if not (fix_meta.get("fix_script") or event.fix_script): + return + + risk_meta = event.metadata.get("risk", {}) if isinstance(event.metadata.get("risk"), dict) else {} + timing_meta = risk_meta.get("timing", {}) if isinstance(risk_meta, dict) else {} + has_timing = bool(timing_meta and timing_meta.get("estimated_seconds")) + risk_version = int(event.metadata.get("risk_version", 0)) + should_refresh = (risk_version < RISK_EVALUATION_VERSION) or (event.risk_score is None) or (not has_timing) + if not should_refresh: + return + + diagnosis = event.metadata.get("diagnosis", {}) + if not isinstance(diagnosis, dict): + diagnosis = {} + + fix_payload = { + "fix_type": fix_meta.get("fix_type", "manual"), + "fix_description": fix_meta.get("fix_description") or event.proposed_fix or "", + "fix_script": fix_meta.get("fix_script") or event.fix_script or "", + "estimated_risk": fix_meta.get("estimated_risk", 0.3), + } + + risk = _risk_evaluator.evaluate( + fix=fix_payload, + diagnosis=diagnosis, + repo=event.repo_full_name, + branch=event.branch, + ) + + event.risk_score = risk.get("score") + event.risk_level = risk.get("level") + event.metadata["risk"] = risk + event.metadata["risk_version"] = RISK_EVALUATION_VERSION + event.update_timestamp() + await event.save() diff --git a/backend/routes/webhook.py b/backend/routes/webhook.py new file mode 100644 index 000000000..f8267c572 --- /dev/null +++ b/backend/routes/webhook.py @@ -0,0 +1,321 @@ +""" +GitHub Webhook Route – receives and validates GitHub Actions events. +""" +import hashlib +import hmac +import logging +import json +import time +from datetime import datetime +from fastapi import APIRouter, Request, HTTPException, BackgroundTasks, Depends + +from backend.config import settings +from backend.models.pipeline_event import PipelineEvent, PipelineStatus +from backend.services.github_service import GitHubService + +logger = logging.getLogger(__name__) +router = APIRouter(prefix="/webhook", tags=["webhooks"]) + + +def get_orchestrator(request: Request): + return request.app.state.orchestrator + + +# Scenario builder conversation state (in production, use Redis or DB) +_builder_sessions = {} + +BUILDER_SYSTEM_PROMPT = """You are PipeGenie's Scenario Builder — an intelligent CI/CD failure assistant. +Your job is to understand what went wrong in the user's pipeline through a friendly conversation. + +Ask ONE clarifying question at a time. Be conversational and natural. +Focus on: failure type, service/component affected, error symptoms, environment, and any error keywords. + +After gathering enough info (3-5 exchanges), provide JSON: +{ + "scenario_name": "Human-readable name", + "failure_category": "dependency_error|test_failure|build_error|config_error|network_error|permissions_error|unknown", + "repo": "repo-name", + "branch": "branch-name", + "commit_message": "what was the user trying to do", + "logs": "reconstructed error logs from conversation", + "ready_to_simulate": true +} + +Before that, just respond with conversational text asking the next question.""" + + +@router.post("/builder-chat") +async def scenario_builder_chat(request: Request, orchestrator=Depends(get_orchestrator)): + """Conversational scenario builder - ask clarifying questions to build a failure scenario.""" + body = await request.json() + session_id = body.get("session_id", "default") + user_message = body.get("message", "") + + if session_id not in _builder_sessions: + _builder_sessions[session_id] = {"history": [], "turn_count": 0} + + session = _builder_sessions[session_id] + session["history"].append({"role": "user", "content": user_message}) + session["turn_count"] += 1 + + try: + started = time.perf_counter() + provider = "unknown" + parsed_payload = None + + # Build compact conversation transcript for the LLM + conversation_lines = [] + for msg in session["history"][-8:]: + role = "User" if msg["role"] == "user" else "Assistant" + conversation_lines.append(f"{role}: {msg['content']}") + conversation_lines.append("Assistant:") + conversation_prompt = "\n".join(conversation_lines) + + if hasattr(orchestrator.diagnosis_agent, "invoke_prompt"): + provider = orchestrator.diagnosis_agent.get_provider_label() + assistant_message = orchestrator.diagnosis_agent.invoke_prompt( + BUILDER_SYSTEM_PROMPT, + conversation_prompt, + ) + else: + assistant_message = "I'm having trouble connecting. Could you describe what error you saw?" + + if isinstance(assistant_message, str): + try: + parsed_payload = json.loads(assistant_message) + except json.JSONDecodeError: + parsed_payload = None + + if isinstance(parsed_payload, dict): + if "Assistant" in parsed_payload and isinstance(parsed_payload["Assistant"], str): + assistant_message = parsed_payload["Assistant"] + elif "message" in parsed_payload and isinstance(parsed_payload["message"], str): + assistant_message = parsed_payload["message"] + + elapsed_ms = int((time.perf_counter() - started) * 1000) + logger.info(f"[BuilderChat] provider={provider} elapsed_ms={elapsed_ms} session={session_id}") + + session["history"].append({"role": "assistant", "content": assistant_message}) + + # Check if we got a structured scenario response + try: + scenario_data = parsed_payload if isinstance(parsed_payload, dict) else json.loads(assistant_message) + if scenario_data.get("ready_to_simulate"): + return { + "message": assistant_message, + "scenario": scenario_data, + "ready": True, + "provider": provider, + "elapsed_ms": elapsed_ms, + } + except json.JSONDecodeError: + pass + + return { + "message": assistant_message, + "scenario": None, + "ready": False, + "provider": provider, + "elapsed_ms": elapsed_ms, + } + + except Exception as e: + logger.error(f"Builder chat failed: {e}") + return {"error": str(e), "message": "Failed to process message"} + + +def get_orchestrator(request: Request): + return request.app.state.orchestrator + + +def verify_signature(payload_body: bytes, signature_header: str) -> bool: + """Verify GitHub webhook HMAC-SHA256 signature.""" + if not signature_header: + return False + hash_object = hmac.new( + settings.GITHUB_WEBHOOK_SECRET.encode("utf-8"), + msg=payload_body, + digestmod=hashlib.sha256 + ) + expected = f"sha256={hash_object.hexdigest()}" + return hmac.compare_digest(expected, signature_header) + + +@router.post("/github") +async def github_webhook( + request: Request, + background_tasks: BackgroundTasks, + orchestrator=Depends(get_orchestrator) +): + """Receives GitHub Actions webhook events.""" + body = await request.body() + signature = request.headers.get("X-Hub-Signature-256", "") + + # Verify signature (skip in dev mode if secret matches default) + if settings.GITHUB_WEBHOOK_SECRET != "pipegenie-webhook-secret": + if not verify_signature(body, signature): + raise HTTPException(status_code=401, detail="Invalid webhook signature") + + event_type = request.headers.get("X-GitHub-Event", "") + payload = await request.json() + + logger.info(f"[Webhook] Received event: {event_type}") + + # Only process workflow_run failures + if event_type == "workflow_run": + action = payload.get("action") + workflow_run = payload.get("workflow_run", {}) + conclusion = workflow_run.get("conclusion") + + if action == "completed" and conclusion == "failure": + event = await _create_pipeline_event(payload, workflow_run) + background_tasks.add_task(orchestrator.process_failure, event) + logger.info(f"[Webhook] Queued processing for run {workflow_run.get('id')}") + return {"status": "accepted", "event_id": event.event_id} + + # Handle ping + if event_type == "ping": + return {"status": "pong", "message": "PipeGenie webhook active!"} + + return {"status": "ignored", "reason": f"Event type '{event_type}' not handled"} + + +async def _create_pipeline_event(payload: dict, workflow_run: dict) -> PipelineEvent: + """Create and persist a PipelineEvent from GitHub webhook payload.""" + repo = payload.get("repository", {}) + head_commit = workflow_run.get("head_commit", {}) + + event = PipelineEvent( + event_id=str(workflow_run.get("id", "")), + repo_full_name=repo.get("full_name", ""), + repo_name=repo.get("name", ""), + branch=workflow_run.get("head_branch", ""), + commit_sha=workflow_run.get("head_sha", ""), + commit_message=head_commit.get("message", ""), + workflow_name=workflow_run.get("name", ""), + status=PipelineStatus.FAILED, + raw_logs=_extract_logs_from_payload(payload), + metadata={ + "html_url": workflow_run.get("html_url", ""), + "actor": workflow_run.get("actor", {}).get("login", ""), + "run_attempt": workflow_run.get("run_attempt", 1), + "jobs_url": workflow_run.get("jobs_url", "") + } + ) + await event.insert() + return event + + +def _extract_logs_from_payload(payload: dict) -> str: + """Extract any available log data from the payload context.""" + wf = payload.get("workflow_run", {}) + return ( + f"Workflow: {wf.get('name', 'Unknown')}\n" + f"Branch: {wf.get('head_branch', '')}\n" + f"Conclusion: {wf.get('conclusion', 'failure')}\n" + f"Run URL: {wf.get('html_url', '')}\n" + f"Attempt: {wf.get('run_attempt', 1)}\n" + f"[Logs will be fetched from GitHub API]\n" + ) + + +@router.post("/preview-diagnosis") +async def preview_diagnosis(request: Request, orchestrator=Depends(get_orchestrator)): + """Preview diagnosis for simulation logs (agentic workflow).""" + body = await request.json() + + try: + diagnosis = await orchestrator.diagnosis_agent.analyze( + event_id="preview", + logs=body.get("logs", ""), + repo=body.get("repo", "demo-repo"), + branch=body.get("branch", "main"), + commit_message=body.get("commit_message", "") + ) + return {"diagnosis": diagnosis} + except Exception as e: + logger.error(f"Diagnosis preview failed: {e}") + return {"error": str(e), "diagnosis": None} + + +@router.post("/preview-fix") +async def preview_fix(request: Request, orchestrator=Depends(get_orchestrator)): + """Preview proposed fix before executing (agentic workflow).""" + body = await request.json() + + try: + # First diagnose + diagnosis = await orchestrator.diagnosis_agent.analyze( + event_id="preview-fix", + logs=body.get("logs", ""), + repo=body.get("repo", "demo-repo"), + branch=body.get("branch", "main"), + commit_message=body.get("commit_message", "") + ) + + # Then generate a fix based on diagnosis + fix = await orchestrator.fixer_agent.generate_fix( + diagnosis=diagnosis, + repo=body.get("repo", "demo-repo"), + branch=body.get("branch", "main"), + raw_logs=body.get("logs", "") + ) + + # Evaluate risk + risk = orchestrator.risk_evaluator.evaluate( + fix=fix, + diagnosis=diagnosis, + repo=body.get("repo", "demo-repo"), + branch=body.get("branch", "main") + ) + + return { + "diagnosis": diagnosis, + "proposed_fix": fix, + "risk_assessment": risk + } + except Exception as e: + logger.error(f"Fix preview failed: {e}") + return {"error": str(e), "fix": None} + + +@router.post("/simulate") +async def simulate_failure( + request: Request, + background_tasks: BackgroundTasks, + orchestrator=Depends(get_orchestrator) +): + """Simulate a pipeline failure for testing (dev endpoint).""" + body = await request.json() + + event = PipelineEvent( + event_id=f"sim-{int(datetime.utcnow().timestamp())}", + repo_full_name=body.get("repo", "demo-org/demo-repo"), + repo_name=body.get("repo", "demo-repo").split("/")[-1], + branch=body.get("branch", "main"), + commit_sha=body.get("commit_sha", "abc1234"), + commit_message=body.get("commit_message", "feat: add new feature"), + workflow_name=body.get("workflow_name", "CI Pipeline"), + status=PipelineStatus.FAILED, + raw_logs=body.get("logs", DEFAULT_SAMPLE_LOGS), + metadata={"simulated": True} + ) + await event.insert() + background_tasks.add_task(orchestrator.process_failure, event) + + return {"status": "simulated", "event_id": event.event_id, "db_id": str(event.id)} + + +DEFAULT_SAMPLE_LOGS = """ +Run actions/setup-python@v4 + with: + python-version: 3.11 +Setting up Python 3.11.0 + +Run pip install -r requirements.txt +Collecting flask==2.3.0 + Downloading Flask-2.3.0-py3-none-any.whl (96 kB) +ERROR: Could not find a version that satisfies the requirement cryptography==41.0.0 (from versions: 39.0.0, 40.0.0) +ERROR: No matching distribution found for cryptography==41.0.0 +##[error]Process completed with exit code 1. +""" diff --git a/backend/services/__init__.py b/backend/services/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/backend/services/github_service.py b/backend/services/github_service.py new file mode 100644 index 000000000..7d7394a2c --- /dev/null +++ b/backend/services/github_service.py @@ -0,0 +1,138 @@ +""" +GitHub Service – interacts with GitHub API for log fetching and re-runs. +""" +import logging +import httpx +from typing import Optional, List + +from backend.config import settings + +logger = logging.getLogger(__name__) + + +class GitHubService: + BASE_URL = "https://api.github.com" + + def __init__(self): + self.token = settings.GITHUB_TOKEN + self.headers = { + "Authorization": f"Bearer {self.token}", + "Accept": "application/vnd.github+json", + "X-GitHub-Api-Version": "2022-11-28" + } + + async def get_workflow_logs(self, repo: str, run_id: str) -> str: + """Fetch logs for a specific workflow run.""" + if not self.token: + return "GitHub token not configured. Using webhook-provided logs." + + url = f"{self.BASE_URL}/repos/{repo}/actions/runs/{run_id}/logs" + async with httpx.AsyncClient() as client: + try: + resp = await client.get(url, headers=self.headers, follow_redirects=True, timeout=30) + if resp.status_code == 200: + # Logs come as a ZIP file - return raw content for now + return resp.text[:8000] # Limit log size + else: + logger.warning(f"GitHub logs fetch failed: {resp.status_code}") + return f"Could not fetch logs: HTTP {resp.status_code}" + except Exception as e: + logger.error(f"GitHub API error: {e}") + return f"GitHub API error: {str(e)}" + + async def get_commit_info(self, repo: str, commit_sha: str) -> dict: + """Fetch commit details.""" + if not self.token: + return {} + url = f"{self.BASE_URL}/repos/{repo}/commits/{commit_sha}" + async with httpx.AsyncClient() as client: + try: + resp = await client.get(url, headers=self.headers, timeout=15) + if resp.status_code == 200: + data = resp.json() + return { + "sha": commit_sha, + "message": data.get("commit", {}).get("message", ""), + "author": data.get("commit", {}).get("author", {}).get("name", ""), + "files_changed": [f["filename"] for f in data.get("files", [])] + } + except Exception as e: + logger.error(f"Commit info error: {e}") + return {} + + async def trigger_rerun(self, repo: str, run_id: str) -> bool: + """Re-trigger a failed GitHub Actions workflow run.""" + if not self.token: + logger.warning("[GitHub] No token configured, skipping re-run") + return False + + url = f"{self.BASE_URL}/repos/{repo}/actions/runs/{run_id}/rerun-failed-jobs" + async with httpx.AsyncClient() as client: + try: + resp = await client.post(url, headers=self.headers, timeout=15) + success = resp.status_code in (201, 204) + logger.info(f"[GitHub] Re-run for {repo}#{run_id}: {'SUCCESS' if success else 'FAILED'}") + return success + except Exception as e: + logger.error(f"[GitHub] Re-run trigger error: {e}") + return False + + async def get_run_info(self, repo: str, run_id: str) -> dict: + """Get metadata about a workflow run.""" + if not self.token: + return {} + url = f"{self.BASE_URL}/repos/{repo}/actions/runs/{run_id}" + async with httpx.AsyncClient() as client: + try: + resp = await client.get(url, headers=self.headers, timeout=15) + if resp.status_code == 200: + data = resp.json() + return { + "workflow_name": data.get("name", ""), + "head_branch": data.get("head_branch", ""), + "head_sha": data.get("head_sha", ""), + "conclusion": data.get("conclusion", ""), + "html_url": data.get("html_url", "") + } + except Exception as e: + logger.error(f"[GitHub] Run info error: {e}") + return {} + + async def create_pull_request( + self, + repo: str, + head_branch: str, + base_branch: str, + title: str, + body: str + ) -> dict: + """Create a pull request and return metadata.""" + if not self.token: + logger.warning("[GitHub] No token configured, skipping PR creation") + return {} + + url = f"{self.BASE_URL}/repos/{repo}/pulls" + payload = { + "title": title, + "head": head_branch, + "base": base_branch, + "body": body, + "draft": False + } + + async with httpx.AsyncClient() as client: + try: + resp = await client.post(url, headers=self.headers, json=payload, timeout=20) + if resp.status_code in (200, 201): + data = resp.json() + return { + "number": data.get("number"), + "html_url": data.get("html_url", ""), + "state": data.get("state", "open") + } + + logger.warning(f"[GitHub] PR creation failed: {resp.status_code} {resp.text[:300]}") + return {} + except Exception as e: + logger.error(f"[GitHub] PR creation error: {e}") + return {} diff --git a/backend/services/websocket_manager.py b/backend/services/websocket_manager.py new file mode 100644 index 000000000..65f35683c --- /dev/null +++ b/backend/services/websocket_manager.py @@ -0,0 +1,46 @@ +""" +WebSocket Manager for real-time dashboard updates. +""" +import json +import logging +from typing import List, Dict, Any +from fastapi import WebSocket + +logger = logging.getLogger(__name__) + + +class WebSocketManager: + def __init__(self): + self.active_connections: List[WebSocket] = [] + + async def connect(self, websocket: WebSocket): + await websocket.accept() + self.active_connections.append(websocket) + logger.info(f"[WS] Client connected. Total: {len(self.active_connections)}") + + def disconnect(self, websocket: WebSocket): + if websocket in self.active_connections: + self.active_connections.remove(websocket) + logger.info(f"[WS] Client disconnected. Total: {len(self.active_connections)}") + + async def broadcast(self, data: Dict[str, Any]): + """Broadcast a message to all connected WebSocket clients.""" + message = json.dumps(data, default=str) + disconnected = [] + for ws in self.active_connections: + try: + await ws.send_text(message) + except Exception as e: + logger.warning(f"[WS] Failed to send to client: {e}") + disconnected.append(ws) + + for ws in disconnected: + self.disconnect(ws) + + async def send_personal(self, websocket: WebSocket, data: Dict[str, Any]): + """Send a message to a single client.""" + try: + await websocket.send_text(json.dumps(data, default=str)) + except Exception as e: + logger.warning(f"[WS] Failed to send personal message: {e}") + self.disconnect(websocket) diff --git a/backend/tests/test_risk_evaluator.py b/backend/tests/test_risk_evaluator.py new file mode 100644 index 000000000..a0bcc3086 --- /dev/null +++ b/backend/tests/test_risk_evaluator.py @@ -0,0 +1,44 @@ +try: + from backend.config import settings + from backend.guardian.risk_evaluator import RiskEvaluator +except ModuleNotFoundError: + from config import settings + from guardian.risk_evaluator import RiskEvaluator + + +def test_risk_evaluator_flags_destructive_script_as_high_risk(): + evaluator = RiskEvaluator() + + report = evaluator.evaluate( + fix={ + "fix_type": "manual", + "fix_script": "#!/bin/bash\nset -e\nrm -rf /\n", + "estimated_risk": 0.9, + }, + diagnosis={"failure_category": "unknown"}, + repo="demo/repo", + branch="main", + ) + + assert report["score"] > settings.RISK_HIGH_THRESHOLD + assert report["level"] == "high" + assert report["timing"]["estimated_seconds"] >= 5 + + +def test_risk_evaluator_returns_timing_metadata(): + evaluator = RiskEvaluator() + + report = evaluator.evaluate( + fix={ + "fix_type": "dependency", + "fix_script": "#!/bin/bash\nset -e\npip install -r requirements.txt\npytest -q\n", + "estimated_risk": 0.3, + }, + diagnosis={"failure_category": "dependency_error"}, + repo="demo/repo", + branch="develop", + ) + + assert "timing" in report + assert report["timing"]["estimated_seconds"] > 20 + assert report["timing"]["level"] in {"moderate", "slow"} diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 000000000..e5fbed10e --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,115 @@ +version: '3.8' + +services: + mongodb: + image: mongo:7.0 + container_name: pipegenie-mongo + restart: unless-stopped + ports: ["27017:27017"] + volumes: + - mongo_data:/data/db + environment: + MONGO_INITDB_DATABASE: pipegenie + + redis: + image: redis:7-alpine + container_name: pipegenie-redis + restart: unless-stopped + ports: ["6379:6379"] + + etcd: + image: quay.io/coreos/etcd:v3.5.5 + container_name: pipegenie-etcd + restart: unless-stopped + environment: + - ETCD_AUTO_COMPACTION_MODE=revision + - ETCD_AUTO_COMPACTION_RETENTION=1000 + - ETCD_QUOTA_BACKEND_BYTES=4294967296 + - ETCD_SNAPSHOT_COUNT=50000 + command: etcd -advertise-client-urls=http://etcd:2379 -listen-client-urls=http://0.0.0.0:2379 --data-dir /etcd + volumes: + - etcd_data:/etcd + + minio: + image: minio/minio:RELEASE.2024-01-16T16-07-38Z + container_name: pipegenie-minio + restart: unless-stopped + environment: + MINIO_ACCESS_KEY: minioadmin + MINIO_SECRET_KEY: minioadmin + command: minio server /minio_data + ports: + - "9001:9001" + - "9000:9000" + volumes: + - minio_data:/minio_data + + milvus: + image: milvusdb/milvus:v2.4.4 + container_name: pipegenie-milvus + restart: unless-stopped + command: ["milvus", "run", "standalone"] + environment: + ETCD_ENDPOINTS: etcd:2379 + MINIO_ADDRESS: minio:9000 + depends_on: [etcd, minio] + ports: + - "19530:19530" + - "9091:9091" + volumes: + - milvus_data:/var/lib/milvus + + backend: + build: + context: ./backend + dockerfile: Dockerfile + container_name: pipegenie-backend + restart: unless-stopped + ports: ["8000:8000"] + environment: + - MONGODB_URL=mongodb://mongodb:27017 + - REDIS_URL=redis://redis:6379 + - MILVUS_HOST=milvus + - MILVUS_PORT=19530 + - OLLAMA_BASE_URL=http://ollama:11434 + - USE_OLLAMA=true + - LLM_MODEL=mistral + - OTEL_ENABLED=true + - OTEL_SERVICE_NAME=pipegenie-backend + - OTEL_EXPORTER_OTLP_ENDPOINT=http://host.docker.internal:4318/v1/traces + - OTEL_EXPORTER_OTLP_INSECURE=true + - OTEL_RESOURCE_ATTRIBUTES=service.namespace=pipegenie,deployment.environment=docker + env_file: ./backend/.env + depends_on: [mongodb, redis, milvus] + + backend-tests: + build: + context: ./backend + dockerfile: Dockerfile.tests + container_name: pipegenie-backend-tests + profiles: ["test"] + command: ["python", "-m", "pytest", "-q", "backend/tests/test_risk_evaluator.py"] + + ollama: + image: ollama/ollama:latest + container_name: pipegenie-ollama + restart: unless-stopped + ports: ["11434:11434"] + volumes: + - ollama_data:/root/.ollama + + frontend: + build: + context: ./frontend + dockerfile: Dockerfile + container_name: pipegenie-frontend + restart: unless-stopped + ports: ["5173:80"] + depends_on: [backend] + +volumes: + mongo_data: + etcd_data: + minio_data: + milvus_data: + ollama_data: diff --git a/docker/fix_runner/Dockerfile b/docker/fix_runner/Dockerfile new file mode 100644 index 000000000..57a0fddd3 --- /dev/null +++ b/docker/fix_runner/Dockerfile @@ -0,0 +1,26 @@ +FROM python:3.11-slim + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + git \ + curl \ + wget \ + jq \ + sudo \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + +# Install common CI/CD tools +RUN pip install --no-cache-dir \ + pytest \ + poetry \ + black \ + flake8 \ + mypy \ + requests \ + httpx + +WORKDIR /workspace + +# Default entrypoint runs whatever script is passed +ENTRYPOINT ["/bin/bash"] diff --git a/frontend/index.html b/frontend/index.html new file mode 100644 index 000000000..2c75ce46e --- /dev/null +++ b/frontend/index.html @@ -0,0 +1,17 @@ + + + + + + + + PipeGenie – AI CI/CD Auto-Remediation + + + + + +
+ + + diff --git a/frontend/package-lock.json b/frontend/package-lock.json new file mode 100644 index 000000000..7f181d965 --- /dev/null +++ b/frontend/package-lock.json @@ -0,0 +1,2234 @@ +{ + "name": "pipegenie-frontend", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "pipegenie-frontend", + "version": "1.0.0", + "dependencies": { + "@radix-ui/react-slot": "^1.2.4", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "date-fns": "^3.6.0", + "lucide-react": "^0.400.0", + "react": "^18.3.1", + "react-dom": "^18.3.1", + "react-hot-toast": "^2.4.1", + "react-router-dom": "^6.24.1", + "recharts": "^2.12.7", + "tailwind-merge": "^3.5.0" + }, + "devDependencies": { + "@types/react": "^18.3.3", + "@types/react-dom": "^18.3.0", + "@vitejs/plugin-react": "^4.3.1", + "vite": "^5.3.4" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.1", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.29.2.tgz", + "integrity": "sha512-HoGuUs4sCZNezVEKdVcwqmZN8GoHirLUcLaYVNBK2J0DadGtdcqgr3BCbvH8+XUo4NGjNl3VOtSjEKNzqfFgKw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.2.tgz", + "integrity": "sha512-4GgRzy/+fsBa72/RZVJmGKPmZu9Byn8o4MoLpmNe1m8ZfYnz5emHLQz3U4gLud6Zwl0RZIcgiLD7Uq7ySFuDLA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.29.2.tgz", + "integrity": "sha512-JiDShH45zKHWyGe4ZNVRrCjBz8Nh9TMmZG1kh4QTK8hCBTWBi8Da+i7s1fJw7/lYpM4ccepSNfqzZ/QvABBi5g==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", + "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.4.tgz", + "integrity": "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@remix-run/router": { + "version": "1.23.2", + "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.23.2.tgz", + "integrity": "sha512-Ic6m2U/rMjTkhERIa/0ZtXJP17QUi2CbWE7cqx4J58M8aA3QTfW+2UlQ4psvTX9IO1RfNVhK3pcpdjej7L+t2w==", + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.27", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz", + "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.1.tgz", + "integrity": "sha512-d6FinEBLdIiK+1uACUttJKfgZREXrF0Qc2SmLII7W2AD8FfiZ9Wjd+rD/iRuf5s5dWrr1GgwXCvPqOuDquOowA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.1.tgz", + "integrity": "sha512-YjG/EwIDvvYI1YvYbHvDz/BYHtkY4ygUIXHnTdLhG+hKIQFBiosfWiACWortsKPKU/+dUwQQCKQM3qrDe8c9BA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.1.tgz", + "integrity": "sha512-mjCpF7GmkRtSJwon+Rq1N8+pI+8l7w5g9Z3vWj4T7abguC4Czwi3Yu/pFaLvA3TTeMVjnu3ctigusqWUfjZzvw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.1.tgz", + "integrity": "sha512-haZ7hJ1JT4e9hqkoT9R/19XW2QKqjfJVv+i5AGg57S+nLk9lQnJ1F/eZloRO3o9Scy9CM3wQ9l+dkXtcBgN5Ew==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.1.tgz", + "integrity": "sha512-czw90wpQq3ZsAVBlinZjAYTKduOjTywlG7fEeWKUA7oCmpA8xdTkxZZlwNJKWqILlq0wehoZcJYfBvOyhPTQ6w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.1.tgz", + "integrity": "sha512-KVB2rqsxTHuBtfOeySEyzEOB7ltlB/ux38iu2rBQzkjbwRVlkhAGIEDiiYnO2kFOkJp+Z7pUXKyrRRFuFUKt+g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.1.tgz", + "integrity": "sha512-L+34Qqil+v5uC0zEubW7uByo78WOCIrBvci69E7sFASRl0X7b/MB6Cqd1lky/CtcSVTydWa2WZwFuWexjS5o6g==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.1.tgz", + "integrity": "sha512-n83O8rt4v34hgFzlkb1ycniJh7IR5RCIqt6mz1VRJD6pmhRi0CXdmfnLu9dIUS6buzh60IvACM842Ffb3xd6Gg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.1.tgz", + "integrity": "sha512-Nql7sTeAzhTAja3QXeAI48+/+GjBJ+QmAH13snn0AJSNL50JsDqotyudHyMbO2RbJkskbMbFJfIJKWA6R1LCJQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.1.tgz", + "integrity": "sha512-+pUymDhd0ys9GcKZPPWlFiZ67sTWV5UU6zOJat02M1+PiuSGDziyRuI/pPue3hoUwm2uGfxdL+trT6Z9rxnlMA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.1.tgz", + "integrity": "sha512-VSvgvQeIcsEvY4bKDHEDWcpW4Yw7BtlKG1GUT4FzBUlEKQK0rWHYBqQt6Fm2taXS+1bXvJT6kICu5ZwqKCnvlQ==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.1.tgz", + "integrity": "sha512-4LqhUomJqwe641gsPp6xLfhqWMbQV04KtPp7/dIp0nzPxAkNY1AbwL5W0MQpcalLYk07vaW9Kp1PBhdpZYYcEw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.1.tgz", + "integrity": "sha512-tLQQ9aPvkBxOc/EUT6j3pyeMD6Hb8QF2BTBnCQWP/uu1lhc9AIrIjKnLYMEroIz/JvtGYgI9dF3AxHZNaEH0rw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.1.tgz", + "integrity": "sha512-RMxFhJwc9fSXP6PqmAz4cbv3kAyvD1etJFjTx4ONqFP9DkTkXsAMU4v3Vyc5BgzC+anz7nS/9tp4obsKfqkDHg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.1.tgz", + "integrity": "sha512-QKgFl+Yc1eEk6MmOBfRHYF6lTxiiiV3/z/BRrbSiW2I7AFTXoBFvdMEyglohPj//2mZS4hDOqeB0H1ACh3sBbg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.1.tgz", + "integrity": "sha512-RAjXjP/8c6ZtzatZcA1RaQr6O1TRhzC+adn8YZDnChliZHviqIjmvFwHcxi4JKPSDAt6Uhf/7vqcBzQJy0PDJg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.1.tgz", + "integrity": "sha512-wcuocpaOlaL1COBYiA89O6yfjlp3RwKDeTIA0hM7OpmhR1Bjo9j31G1uQVpDlTvwxGn2nQs65fBFL5UFd76FcQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.1.tgz", + "integrity": "sha512-77PpsFQUCOiZR9+LQEFg9GClyfkNXj1MP6wRnzYs0EeWbPcHs02AXu4xuUbM1zhwn3wqaizle3AEYg5aeoohhg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.1.tgz", + "integrity": "sha512-5cIATbk5vynAjqqmyBjlciMJl1+R/CwX9oLk/EyiFXDWd95KpHdrOJT//rnUl4cUcskrd0jCCw3wpZnhIHdD9w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.1.tgz", + "integrity": "sha512-cl0w09WsCi17mcmWqqglez9Gk8isgeWvoUZ3WiJFYSR3zjBQc2J5/ihSjpl+VLjPqjQ/1hJRcqBfLjssREQILw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.1.tgz", + "integrity": "sha512-4Cv23ZrONRbNtbZa37mLSueXUCtN7MXccChtKpUnQNgF010rjrjfHx3QxkS2PI7LqGT5xXyYs1a7LbzAwT0iCA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.1.tgz", + "integrity": "sha512-i1okWYkA4FJICtr7KpYzFpRTHgy5jdDbZiWfvny21iIKky5YExiDXP+zbXzm3dUcFpkEeYNHgQ5fuG236JPq0g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.1.tgz", + "integrity": "sha512-u09m3CuwLzShA0EYKMNiFgcjjzwqtUMLmuCJLeZWjjOYA3IT2Di09KaxGBTP9xVztWyIWjVdsB2E9goMjZvTQg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.1.tgz", + "integrity": "sha512-k+600V9Zl1CM7eZxJgMyTUzmrmhB/0XZnF4pRypKAlAgxmedUA+1v9R+XOFv56W4SlHEzfeMtzujLJD22Uz5zg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.1.tgz", + "integrity": "sha512-lWMnixq/QzxyhTV6NjQJ4SFo1J6PvOX8vUx5Wb4bBPsEb+8xZ89Bz6kOXpfXj9ak9AHTQVQzlgzBEc1SyM27xQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/d3-array": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz", + "integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==", + "license": "MIT" + }, + "node_modules/@types/d3-color": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", + "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==", + "license": "MIT" + }, + "node_modules/@types/d3-ease": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz", + "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==", + "license": "MIT" + }, + "node_modules/@types/d3-interpolate": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", + "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", + "license": "MIT", + "dependencies": { + "@types/d3-color": "*" + } + }, + "node_modules/@types/d3-path": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz", + "integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==", + "license": "MIT" + }, + "node_modules/@types/d3-scale": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz", + "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==", + "license": "MIT", + "dependencies": { + "@types/d3-time": "*" + } + }, + "node_modules/@types/d3-shape": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.8.tgz", + "integrity": "sha512-lae0iWfcDeR7qt7rA88BNiqdvPS5pFVPpo5OfjElwNaT2yyekbM0C9vK+yqBqEmHr6lDkRnYNoTBYlAgJa7a4w==", + "license": "MIT", + "dependencies": { + "@types/d3-path": "*" + } + }, + "node_modules/@types/d3-time": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz", + "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==", + "license": "MIT" + }, + "node_modules/@types/d3-timer": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz", + "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/prop-types": { + "version": "15.7.15", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.15.tgz", + "integrity": "sha512-F6bEyamV9jKGAFBEmlQnesRPGOQqS2+Uwi0Em15xenOxHaf2hv6L8YCVn3rPdPJOiJfPiCnLIRyvwVaqMY3MIw==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "18.3.28", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.28.tgz", + "integrity": "sha512-z9VXpC7MWrhfWipitjNdgCauoMLRdIILQsAEV+ZesIzBq/oUlxk0m3ApZuMFCXdnS4U7KrI+l3WRUEGQ8K1QKw==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.3.7", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.7.tgz", + "integrity": "sha512-MEe3UeoENYVFXzoXEWsvcpg6ZvlrFNlOQ7EOsvhI3CfAXwzPfO8Qwuxd40nepsYKqyyVQnTdEfv68q91yLcKrQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^18.0.0" + } + }, + "node_modules/@vitejs/plugin-react": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz", + "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-beta.27", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.17.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/baseline-browser-mapping": { + "version": "2.10.13", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.13.tgz", + "integrity": "sha512-BL2sTuHOdy0YT1lYieUxTw/QMtPBC3pmlJC6xk8BBYVv6vcw3SGdKemQ+Xsx9ik2F/lYDO9tqsFQH1r9PFuHKw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/browserslist": { + "version": "4.28.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.2.tgz", + "integrity": "sha512-48xSriZYYg+8qXna9kwqjIVzuQxi+KYWp2+5nCYnYKPTr0LvD89Jqk2Or5ogxz0NUMfIjhh2lIUX/LyX9B4oIg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.10.12", + "caniuse-lite": "^1.0.30001782", + "electron-to-chromium": "^1.5.328", + "node-releases": "^2.0.36", + "update-browserslist-db": "^1.2.3" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001784", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001784.tgz", + "integrity": "sha512-WU346nBTklUV9YfUl60fqRbU5ZqyXlqvo1SgigE1OAXK5bFL8LL9q1K7aap3N739l4BvNqnkm3YrGHiY9sfUQw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/class-variance-authority": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz", + "integrity": "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==", + "license": "Apache-2.0", + "dependencies": { + "clsx": "^2.1.1" + }, + "funding": { + "url": "https://polar.sh/cva" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" + }, + "node_modules/d3-array": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz", + "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", + "license": "ISC", + "dependencies": { + "internmap": "1 - 2" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-color": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-format": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.2.tgz", + "integrity": "sha512-AJDdYOdnyRDV5b6ArilzCPPwc1ejkHcoyFarqlPqT7zRYjhavcT3uSrqcMvsgh2CgoPbK3RCwyHaVyxYcP2Arg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-path": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz", + "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-scale": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz", + "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", + "license": "ISC", + "dependencies": { + "d3-array": "2.10.0 - 3", + "d3-format": "1 - 3", + "d3-interpolate": "1.2.0 - 3", + "d3-time": "2.1.1 - 3", + "d3-time-format": "2 - 4" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-shape": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz", + "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==", + "license": "ISC", + "dependencies": { + "d3-path": "^3.1.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz", + "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", + "license": "ISC", + "dependencies": { + "d3-array": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time-format": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz", + "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", + "license": "ISC", + "dependencies": { + "d3-time": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/date-fns": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-3.6.0.tgz", + "integrity": "sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/kossnocorp" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decimal.js-light": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/decimal.js-light/-/decimal.js-light-2.5.1.tgz", + "integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==", + "license": "MIT" + }, + "node_modules/dom-helpers": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz", + "integrity": "sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.8.7", + "csstype": "^3.0.2" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.331", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.331.tgz", + "integrity": "sha512-IbxXrsTlD3hRodkLnbxAPP4OuJYdWCeM3IOdT+CpcMoIwIoDfCmRpEtSPfwBXxVkg9xmBeY7Lz2Eo2TDn/HC3Q==", + "dev": true, + "license": "ISC" + }, + "node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "license": "MIT" + }, + "node_modules/fast-equals": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/fast-equals/-/fast-equals-5.4.0.tgz", + "integrity": "sha512-jt2DW/aNFNwke7AUd+Z+e6pz39KO5rzdbbFCg2sGafS4mk13MI7Z8O5z9cADNn5lhGODIgLwug6TZO2ctf7kcw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/goober": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/goober/-/goober-2.1.18.tgz", + "integrity": "sha512-2vFqsaDVIT9Gz7N6kAL++pLpp41l3PfDuusHcjnGLfR6+huZkl6ziX+zgVC3ZxpqWhzH6pyDdGrCeDhMIvwaxw==", + "license": "MIT", + "peerDependencies": { + "csstype": "^3.0.10" + } + }, + "node_modules/internmap": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", + "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/lodash": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.18.1.tgz", + "integrity": "sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q==", + "license": "MIT" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lucide-react": { + "version": "0.400.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.400.0.tgz", + "integrity": "sha512-rpp7pFHh3Xd93KHixNgB0SqThMHpYNzsGUu69UaQbSZ75Q/J3m5t6EhKyMT3m4w2WOxmJ2mY0tD3vebnXqQryQ==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-releases": { + "version": "2.0.37", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.37.tgz", + "integrity": "sha512-1h5gKZCF+pO/o3Iqt5Jp7wc9rH3eJJ0+nh/CIoiRwjRxde/hAHyLPXYN4V3CqKAbiZPSeJFSWHmJsbkicta0Eg==", + "dev": true, + "license": "MIT" + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/postcss": { + "version": "8.5.8", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.8.tgz", + "integrity": "sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/prop-types/node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "license": "MIT" + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/react-hot-toast": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/react-hot-toast/-/react-hot-toast-2.6.0.tgz", + "integrity": "sha512-bH+2EBMZ4sdyou/DPrfgIouFpcRLCJ+HoCA32UoAYHn6T3Ur5yfcDCeSr5mwldl6pFOsiocmrXMuoCJ1vV8bWg==", + "license": "MIT", + "dependencies": { + "csstype": "^3.1.3", + "goober": "^2.1.16" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "react": ">=16", + "react-dom": ">=16" + } + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "license": "MIT" + }, + "node_modules/react-refresh": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz", + "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-router": { + "version": "6.30.3", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.30.3.tgz", + "integrity": "sha512-XRnlbKMTmktBkjCLE8/XcZFlnHvr2Ltdr1eJX4idL55/9BbORzyZEaIkBFDhFGCEWBBItsVrDxwx3gnisMitdw==", + "license": "MIT", + "dependencies": { + "@remix-run/router": "1.23.2" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8" + } + }, + "node_modules/react-router-dom": { + "version": "6.30.3", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.30.3.tgz", + "integrity": "sha512-pxPcv1AczD4vso7G4Z3TKcvlxK7g7TNt3/FNGMhfqyntocvYKj+GCatfigGDjbLozC4baguJ0ReCigoDJXb0ag==", + "license": "MIT", + "dependencies": { + "@remix-run/router": "1.23.2", + "react-router": "6.30.3" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8", + "react-dom": ">=16.8" + } + }, + "node_modules/react-smooth": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/react-smooth/-/react-smooth-4.0.4.tgz", + "integrity": "sha512-gnGKTpYwqL0Iii09gHobNolvX4Kiq4PKx6eWBCYYix+8cdw+cGo3do906l1NBPKkSWx1DghC1dlWG9L2uGd61Q==", + "license": "MIT", + "dependencies": { + "fast-equals": "^5.0.1", + "prop-types": "^15.8.1", + "react-transition-group": "^4.4.5" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/react-transition-group": { + "version": "4.4.5", + "resolved": "https://registry.npmjs.org/react-transition-group/-/react-transition-group-4.4.5.tgz", + "integrity": "sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g==", + "license": "BSD-3-Clause", + "dependencies": { + "@babel/runtime": "^7.5.5", + "dom-helpers": "^5.0.1", + "loose-envify": "^1.4.0", + "prop-types": "^15.6.2" + }, + "peerDependencies": { + "react": ">=16.6.0", + "react-dom": ">=16.6.0" + } + }, + "node_modules/recharts": { + "version": "2.15.4", + "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.15.4.tgz", + "integrity": "sha512-UT/q6fwS3c1dHbXv2uFgYJ9BMFHu3fwnd7AYZaEQhXuYQ4hgsxLvsUXzGdKeZrW5xopzDCvuA2N41WJ88I7zIw==", + "license": "MIT", + "dependencies": { + "clsx": "^2.0.0", + "eventemitter3": "^4.0.1", + "lodash": "^4.17.21", + "react-is": "^18.3.1", + "react-smooth": "^4.0.4", + "recharts-scale": "^0.4.4", + "tiny-invariant": "^1.3.1", + "victory-vendor": "^36.6.8" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "react": "^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/recharts-scale": { + "version": "0.4.5", + "resolved": "https://registry.npmjs.org/recharts-scale/-/recharts-scale-0.4.5.tgz", + "integrity": "sha512-kivNFO+0OcUNu7jQquLXAxz1FIwZj8nrj+YkOKc5694NbjCvcT6aSZiIzNzd2Kul4o4rTto8QVR9lMNtxD4G1w==", + "license": "MIT", + "dependencies": { + "decimal.js-light": "^2.4.1" + } + }, + "node_modules/rollup": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.1.tgz", + "integrity": "sha512-VmtB2rFU/GroZ4oL8+ZqXgSA38O6GR8KSIvWmEFv63pQ0G6KaBH9s07PO8XTXP4vI+3UJUEypOfjkGfmSBBR0w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.60.1", + "@rollup/rollup-android-arm64": "4.60.1", + "@rollup/rollup-darwin-arm64": "4.60.1", + "@rollup/rollup-darwin-x64": "4.60.1", + "@rollup/rollup-freebsd-arm64": "4.60.1", + "@rollup/rollup-freebsd-x64": "4.60.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.60.1", + "@rollup/rollup-linux-arm-musleabihf": "4.60.1", + "@rollup/rollup-linux-arm64-gnu": "4.60.1", + "@rollup/rollup-linux-arm64-musl": "4.60.1", + "@rollup/rollup-linux-loong64-gnu": "4.60.1", + "@rollup/rollup-linux-loong64-musl": "4.60.1", + "@rollup/rollup-linux-ppc64-gnu": "4.60.1", + "@rollup/rollup-linux-ppc64-musl": "4.60.1", + "@rollup/rollup-linux-riscv64-gnu": "4.60.1", + "@rollup/rollup-linux-riscv64-musl": "4.60.1", + "@rollup/rollup-linux-s390x-gnu": "4.60.1", + "@rollup/rollup-linux-x64-gnu": "4.60.1", + "@rollup/rollup-linux-x64-musl": "4.60.1", + "@rollup/rollup-openbsd-x64": "4.60.1", + "@rollup/rollup-openharmony-arm64": "4.60.1", + "@rollup/rollup-win32-arm64-msvc": "4.60.1", + "@rollup/rollup-win32-ia32-msvc": "4.60.1", + "@rollup/rollup-win32-x64-gnu": "4.60.1", + "@rollup/rollup-win32-x64-msvc": "4.60.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/tailwind-merge": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.5.0.tgz", + "integrity": "sha512-I8K9wewnVDkL1NTGoqWmVEIlUcB9gFriAEkXkfCjX5ib8ezGxtR3xD7iZIxrfArjEsH7F1CHD4RFUtxefdqV/A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", + "license": "MIT" + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/victory-vendor": { + "version": "36.9.2", + "resolved": "https://registry.npmjs.org/victory-vendor/-/victory-vendor-36.9.2.tgz", + "integrity": "sha512-PnpQQMuxlwYdocC8fIJqVXvkeViHYzotI+NJrCuav0ZYFoq912ZHBk3mCeuj+5/VpodOjPe1z0Fk2ihgzlXqjQ==", + "license": "MIT AND ISC", + "dependencies": { + "@types/d3-array": "^3.0.3", + "@types/d3-ease": "^3.0.0", + "@types/d3-interpolate": "^3.0.1", + "@types/d3-scale": "^4.0.2", + "@types/d3-shape": "^3.1.0", + "@types/d3-time": "^3.0.0", + "@types/d3-timer": "^3.0.0", + "d3-array": "^3.1.6", + "d3-ease": "^3.0.1", + "d3-interpolate": "^3.0.1", + "d3-scale": "^4.0.2", + "d3-shape": "^3.1.0", + "d3-time": "^3.0.0", + "d3-timer": "^3.0.1" + } + }, + "node_modules/vite": { + "version": "5.4.21", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz", + "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.21.3", + "postcss": "^8.4.43", + "rollup": "^4.20.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + } + } +} diff --git a/frontend/package.json b/frontend/package.json new file mode 100644 index 000000000..38440af86 --- /dev/null +++ b/frontend/package.json @@ -0,0 +1,30 @@ +{ + "name": "pipegenie-frontend", + "private": true, + "version": "1.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + }, + "dependencies": { + "@radix-ui/react-slot": "^1.2.4", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "date-fns": "^3.6.0", + "lucide-react": "^0.400.0", + "react": "^18.3.1", + "react-dom": "^18.3.1", + "react-hot-toast": "^2.4.1", + "react-router-dom": "^6.24.1", + "recharts": "^2.12.7", + "tailwind-merge": "^3.5.0" + }, + "devDependencies": { + "@types/react": "^18.3.3", + "@types/react-dom": "^18.3.0", + "@vitejs/plugin-react": "^4.3.1", + "vite": "^5.3.4" + } +} diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx new file mode 100644 index 000000000..f8bb79fa0 --- /dev/null +++ b/frontend/src/App.jsx @@ -0,0 +1,278 @@ +import React, { useEffect, useRef, useState } from 'react' +import { BrowserRouter, NavLink, Route, Routes } from 'react-router-dom' +import { + Activity, + BookOpen, + Circle, + FlaskConical, + History as HistoryIcon, + LayoutDashboard, + Moon, + Sun, + ShieldAlert, + Wifi, + WifiOff, +} from 'lucide-react' +import Dashboard from './pages/Dashboard.jsx' +import Approvals from './pages/Approvals.jsx' +import History from './pages/History.jsx' +import Simulate from './pages/Simulate.jsx' +import EventDetail from './pages/EventDetail.jsx' + +const WS_URL = 'ws://localhost:8000/api/dashboard/ws' +const BACKEND_BASE_URL = import.meta.env.VITE_BACKEND_URL || 'http://localhost:8000' + +export const WSContext = React.createContext(null) + +function Sidebar({ pendingCount, liveCount, wsConnected, theme, onToggleTheme, backendInfo }) { + const navItems = [ + { path: '/', label: 'Dashboard', icon: LayoutDashboard, exact: true }, + { path: '/approvals', label: 'Approvals', icon: ShieldAlert, badge: pendingCount }, + { path: '/history', label: 'History', icon: HistoryIcon }, + { path: '/simulate', label: 'Simulate', icon: FlaskConical }, + ] + + return ( + + ) +} + +function OpsToolbar({ backendInfo, wsConnected }) { + return ( +
+
+

Operations Command Surface

+

Live reliability telemetry, guarded remediation workflows, and incident audit trails.

+
+
+
+ + Backend {backendInfo.healthy ? 'healthy' : 'unavailable'} +
+
WS {wsConnected ? 'connected' : 'reconnecting'}
+ + API root + + + + Docs + +
+
+ ) +} + +export default function App() { + const [wsMessages, setWsMessages] = useState([]) + const [wsConnected, setWsConnected] = useState(false) + const [pendingCount, setPendingCount] = useState(0) + const [liveCount, setLiveCount] = useState(0) + const [backendInfo, setBackendInfo] = useState({ + healthy: false, + name: 'PipeGenie', + version: 'unknown', + }) + const wsRef = useRef(null) + const pingIntervalRef = useRef(null) + const [theme, setTheme] = useState(() => { + const storedTheme = localStorage.getItem('pg-theme') + if (storedTheme === 'light' || storedTheme === 'dark') return storedTheme + return window.matchMedia('(prefers-color-scheme: dark)').matches ? 'dark' : 'light' + }) + + useEffect(() => { + document.documentElement.setAttribute('data-theme', theme) + localStorage.setItem('pg-theme', theme) + }, [theme]) + + useEffect(() => { + connectWS() + fetchPendingCount() + fetchBackendInfo() + + const healthInterval = setInterval(fetchBackendInfo, 30000) + + return () => { + if (pingIntervalRef.current) { + clearInterval(pingIntervalRef.current) + } + clearInterval(healthInterval) + wsRef.current?.close() + } + }, []) + + function connectWS() { + try { + const ws = new WebSocket(WS_URL) + wsRef.current = ws + + ws.onopen = () => { + setWsConnected(true) + + if (pingIntervalRef.current) { + clearInterval(pingIntervalRef.current) + } + + pingIntervalRef.current = setInterval(() => { + if (ws.readyState === 1) ws.send('ping') + }, 25000) + } + + ws.onmessage = (e) => { + try { + const msg = JSON.parse(e.data) + if (msg.type === 'pong') return + + setWsMessages((prev) => [msg, ...prev.slice(0, 99)]) + + if (['diagnosing', 'fixing', 'retrying'].includes(msg.status)) { + setLiveCount((count) => count + 1) + } else { + setLiveCount((count) => Math.max(0, count - 1)) + } + + if (msg.type === 'approval_required') { + setPendingCount((count) => count + 1) + } + + if (msg.type === 'fix_rejected' || msg.type === 'fix_complete') { + setPendingCount((count) => Math.max(0, count - 1)) + } + } catch (_) { + return + } + } + + ws.onclose = () => { + setWsConnected(false) + if (pingIntervalRef.current) { + clearInterval(pingIntervalRef.current) + pingIntervalRef.current = null + } + setTimeout(connectWS, 3000) + } + + ws.onerror = () => ws.close() + } catch (_) { + return + } + } + + async function fetchPendingCount() { + try { + const response = await fetch('/api/approvals/pending') + const data = await response.json() + setPendingCount(data.total || 0) + } catch (_) { + return + } + } + + async function fetchBackendInfo() { + try { + const [rootRes, healthRes] = await Promise.all([ + fetch(`${BACKEND_BASE_URL}/`), + fetch(`${BACKEND_BASE_URL}/health`), + ]) + + const root = rootRes.ok ? await rootRes.json() : null + const health = healthRes.ok ? await healthRes.json() : null + + setBackendInfo({ + healthy: health?.status === 'healthy', + name: root?.name || 'PipeGenie', + version: root?.version || 'unknown', + }) + } catch (_) { + setBackendInfo((prev) => ({ ...prev, healthy: false })) + } + } + + return ( + + +
+ setTheme((prev) => (prev === 'dark' ? 'light' : 'dark'))} + backendInfo={backendInfo} + /> +
+ + + } /> + } /> + } /> + } /> + } /> + +
+
+
+
+ ) +} diff --git a/frontend/src/components/ui/badge.jsx b/frontend/src/components/ui/badge.jsx new file mode 100644 index 000000000..da329edd3 --- /dev/null +++ b/frontend/src/components/ui/badge.jsx @@ -0,0 +1,25 @@ +import * as React from 'react' +import { cva } from 'class-variance-authority' +import { cn } from '../../lib/utils' + +const badgeVariants = cva('ui-badge', { + variants: { + variant: { + default: 'ui-badge-default', + secondary: 'ui-badge-secondary', + outline: 'ui-badge-outline', + success: 'ui-badge-success', + warning: 'ui-badge-warning', + danger: 'ui-badge-danger', + }, + }, + defaultVariants: { + variant: 'default', + }, +}) + +function Badge({ className, variant, ...props }) { + return
+} + +export { Badge, badgeVariants } diff --git a/frontend/src/components/ui/button.jsx b/frontend/src/components/ui/button.jsx new file mode 100644 index 000000000..6343f25d3 --- /dev/null +++ b/frontend/src/components/ui/button.jsx @@ -0,0 +1,40 @@ +import * as React from 'react' +import { Slot } from '@radix-ui/react-slot' +import { cva } from 'class-variance-authority' +import { cn } from '../../lib/utils' + +const buttonVariants = cva('ui-button', { + variants: { + variant: { + default: 'ui-button-default', + outline: 'ui-button-outline', + ghost: 'ui-button-ghost', + success: 'ui-button-success', + danger: 'ui-button-danger', + }, + size: { + default: 'ui-button-md', + sm: 'ui-button-sm', + lg: 'ui-button-lg', + icon: 'ui-button-icon', + }, + }, + defaultVariants: { + variant: 'default', + size: 'default', + }, +}) + +const Button = React.forwardRef(({ className, variant, size, asChild = false, ...props }, ref) => { + const Comp = asChild ? Slot : 'button' + return ( + + ) +}) +Button.displayName = 'Button' + +export { Button, buttonVariants } diff --git a/frontend/src/components/ui/card.jsx b/frontend/src/components/ui/card.jsx new file mode 100644 index 000000000..d276cdc93 --- /dev/null +++ b/frontend/src/components/ui/card.jsx @@ -0,0 +1,34 @@ +import * as React from 'react' +import { cn } from '../../lib/utils' + +const Card = React.forwardRef(({ className, ...props }, ref) => ( +
+)) +Card.displayName = 'Card' + +const CardHeader = React.forwardRef(({ className, ...props }, ref) => ( +
+)) +CardHeader.displayName = 'CardHeader' + +const CardTitle = React.forwardRef(({ className, ...props }, ref) => ( +

+)) +CardTitle.displayName = 'CardTitle' + +const CardDescription = React.forwardRef(({ className, ...props }, ref) => ( +

+)) +CardDescription.displayName = 'CardDescription' + +const CardContent = React.forwardRef(({ className, ...props }, ref) => ( +

+)) +CardContent.displayName = 'CardContent' + +const CardFooter = React.forwardRef(({ className, ...props }, ref) => ( +
+)) +CardFooter.displayName = 'CardFooter' + +export { Card, CardContent, CardDescription, CardFooter, CardHeader, CardTitle } diff --git a/frontend/src/components/ui/progress.jsx b/frontend/src/components/ui/progress.jsx new file mode 100644 index 000000000..d8c3c5706 --- /dev/null +++ b/frontend/src/components/ui/progress.jsx @@ -0,0 +1,15 @@ +import * as React from 'react' +import { cn } from '../../lib/utils' + +const Progress = React.forwardRef(({ className, value = 0, ...props }, ref) => { + const bounded = Math.min(100, Math.max(0, value)) + + return ( +
+
+
+ ) +}) +Progress.displayName = 'Progress' + +export { Progress } diff --git a/frontend/src/components/ui/separator.jsx b/frontend/src/components/ui/separator.jsx new file mode 100644 index 000000000..631e72691 --- /dev/null +++ b/frontend/src/components/ui/separator.jsx @@ -0,0 +1,19 @@ +import * as React from 'react' +import { cn } from '../../lib/utils' + +const Separator = React.forwardRef(({ className, orientation = 'horizontal', ...props }, ref) => ( +
+)) +Separator.displayName = 'Separator' + +export { Separator } diff --git a/frontend/src/index.css b/frontend/src/index.css new file mode 100644 index 000000000..d64fba068 --- /dev/null +++ b/frontend/src/index.css @@ -0,0 +1,1673 @@ +@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700;800&family=Poppins:wght@500;600;700;800&family=JetBrains+Mono:wght@400;500&display=swap'); + +:root { + --bg: #f5f5f5; + --panel: #ffffff; + --panel-muted: #fafafa; + --panel-alt: #f0f0f0; + --line: #e0e0e0; + --line-strong: #cccccc; + --text: #1a1a1a; + --text-soft: #666666; + --text-faint: #999999; + + --tone-900: #1a1a1a; + --tone-800: #333333; + --tone-700: #4d4d4d; + --tone-600: #666666; + --tone-500: #808080; + --tone-300: #cccccc; + + --ok: #7cc6a3; + --warn: #d7bc78; + --danger: #de9898; + --accent: #61afef; + --accent-soft: #e8f4ff; + --product-rail: #999999; + --brand-a: #61afef; + --brand-b: #61afef; + --brand-c: #1a1a1a; + --font-mono: 'JetBrains Mono', 'SFMono-Regular', Menlo, Monaco, Consolas, monospace; + + --radius-sm: 6px; + --radius-md: 8px; + --radius-lg: 10px; +} + +[data-theme='dark'] { + --bg: #000000; + --panel: #0a0a0a; + --panel-muted: #131313; + --panel-alt: #1a1a1a; + --line: #262626; + --line-strong: #333333; + --text: #ffffff; + --text-soft: #b3b3b3; + --text-faint: #808080; + + --tone-900: #f5f5f5; + --tone-800: #e0e0e0; + --tone-700: #cccccc; + --tone-600: #b3b3b3; + --tone-500: #999999; + --tone-300: #666666; + + --ok: #7cc6a3; + --warn: #d7bc78; + --danger: #de9898; + --accent: #61afef; + --accent-soft: #1a1a1a; + --product-rail: #999999; + --brand-a: #61afef; + --brand-b: #61afef; + --brand-c: #ffffff; +} + +* { + box-sizing: border-box; +} + +body { + margin: 0; + font-family: 'Inter', 'Segoe UI', 'Helvetica Neue', Arial, sans-serif; + background: var(--bg); + color: var(--text); + letter-spacing: -0.005em; +} + +h1, +h2, +h3, +h4, +h5, +h6 { + font-family: 'Poppins', 'Inter', 'Segoe UI', 'Helvetica Neue', Arial, sans-serif; +} + +code, +pre, +.ui-code { + font-family: var(--font-mono); +} + +/* Terminal-like surfaces */ +textarea.ui-code, +.approvals-code, +.ui-code { + font-family: var(--font-mono); +} + +.app-shell { + display: grid; + grid-template-columns: 260px minmax(0, 1fr); + min-height: 100vh; + position: relative; + isolation: isolate; +} + +.app-sidebar { + position: sticky; + top: 0; + height: 100vh; + padding: 14px 12px; + border-right: 1px solid var(--line); + background: var(--panel); + display: flex; + flex-direction: column; + gap: 14px; + z-index: 2; +} + +.sidebar-brand { + display: flex; + align-items: center; + gap: 10px; + padding: 6px 6px 8px; + border-bottom: 1px solid color-mix(in srgb, var(--line) 88%, transparent); +} + +.sidebar-brand-mark { + width: 34px; + height: 34px; + border-radius: 6px; + background: var(--accent); + color: var(--bg); + display: grid; + place-items: center; + border: 1px solid var(--accent); +} + +.sidebar-brand-title { + margin: 0; + font-weight: 800; + letter-spacing: -0.02em; + font-size: 15px; + color: color-mix(in srgb, var(--brand-c) 76%, var(--text)); +} + +.sidebar-brand-subtitle { + margin: 2px 0 0; + color: var(--text-faint); + font-size: 12px; +} + +.sidebar-brand-tag { + margin-top: 4px; + display: inline-flex; + font-size: 10px; + font-weight: 700; + letter-spacing: 0.06em; + text-transform: uppercase; + color: color-mix(in srgb, var(--brand-a) 72%, var(--brand-b)); + border: 1px solid color-mix(in srgb, var(--brand-a) 35%, var(--line)); + border-radius: 999px; + padding: 2px 7px; + background: color-mix(in srgb, var(--brand-a) 6%, var(--panel)); +} + +.sidebar-live-status { + display: flex; + align-items: center; + gap: 8px; + border: 1px solid var(--line); + border-radius: var(--radius-sm); + background: var(--panel); + padding: 6px 8px; + color: var(--text-soft); + font-size: 11px; + justify-content: space-between; + border-style: solid; +} + +.theme-toggle { + margin-left: auto; + border: 1px solid var(--line-strong); + background: var(--panel-alt); + color: var(--text-soft); + border-radius: 999px; + height: 26px; + padding: 0 9px; + display: inline-flex; + align-items: center; + gap: 5px; + font-size: 11px; + cursor: pointer; +} + +.theme-toggle:hover { + color: var(--text); + background: var(--panel); +} + +.sidebar-nav-list { + display: flex; + flex-direction: column; + gap: 4px; +} + +.sidebar-nav-item { + text-decoration: none; + color: var(--text-soft); + display: flex; + align-items: center; + gap: 10px; + border-radius: 4px; + padding: 7px 8px; + border: 1px solid transparent; + transition: background-color 140ms ease, border-color 140ms ease, color 140ms ease; +} + +.sidebar-nav-item:hover { + background: var(--panel); + border-color: var(--line); +} + +.sidebar-nav-item.active { + color: var(--text); + background: var(--panel); + border-color: var(--line-strong); + border-left: 3px solid var(--accent); +} + +.sidebar-nav-icon { + width: 24px; + height: 24px; + border-radius: 4px; + background: var(--panel-muted); + display: grid; + place-items: center; + border: 1px solid color-mix(in srgb, var(--line) 85%, transparent); +} + +.sidebar-nav-badge { + margin-left: auto; + font-size: 11px; + font-weight: 700; + border: 1px solid var(--line-strong); + border-radius: 999px; + padding: 2px 8px; +} + +.sidebar-footer-stats { + margin-top: auto; + border: 1px solid var(--line); + border-radius: 6px; + background: var(--panel); + padding: 7px; + display: grid; + gap: 6px; +} + +.sidebar-footer-stats p { + margin: 0; + font-size: 11px; + color: var(--text-faint); +} + +.sidebar-footer-stats strong { + font-size: 18px; + letter-spacing: -0.03em; +} + +.sidebar-backend-status { + border-top: 1px dashed var(--line); + padding-top: 8px; +} + +.sidebar-backend-status > div { + display: inline-flex; + align-items: center; + gap: 6px; + margin-top: 4px; +} + +.sidebar-backend-dot { + width: 8px; + height: 8px; + border-radius: 999px; + display: inline-block; +} + +.sidebar-backend-dot.ok { + background: var(--ok); +} + +.sidebar-backend-dot.warn { + background: var(--danger); +} + +.sidebar-backend-status small { + display: block; + margin-top: 4px; + font-size: 11px; + color: var(--text-faint); +} + +.app-main { + padding: 14px; + position: relative; + z-index: 1; +} + +.ops-toolbar { + border: 1px solid var(--line); + background: var(--panel-alt); + border-radius: 6px; + padding: 8px 10px; + margin-bottom: 10px; + display: flex; + justify-content: space-between; + gap: 10px; + align-items: center; + flex-wrap: wrap; +} + +.ops-toolbar-title h2 { + margin: 0; + font-size: 14px; + letter-spacing: -0.02em; +} + +.ops-toolbar-title p { + margin: 4px 0 0; + color: var(--text-soft); + font-size: 11px; +} + +.ops-toolbar-meta { + display: flex; + gap: 8px; + align-items: center; + flex-wrap: wrap; +} + +.ops-chip { + border: 1px solid var(--line-strong); + border-radius: 999px; + padding: 3px 7px; + font-size: 11px; + display: inline-flex; + align-items: center; + gap: 6px; + color: var(--text-soft); + background: var(--panel-muted); +} + +.ops-chip-dot-ok { + color: var(--ok); + fill: currentColor; +} + +.ops-chip-dot-warn { + color: var(--danger); + fill: currentColor; +} + +.ops-link { + border: 1px solid var(--line-strong); + border-radius: 999px; + text-decoration: none; + color: var(--text); + font-size: 11px; + font-weight: 600; + padding: 3px 7px; + display: inline-flex; + align-items: center; + gap: 6px; +} + +.ops-link:hover { + background: var(--panel-alt); +} + +.page-stack { + display: grid; + gap: 10px; +} + +.page-header-block { + display: flex; + justify-content: space-between; + gap: 8px; + align-items: flex-start; + flex-wrap: wrap; +} + +.page-header-block h1 { + margin: 0; + font-size: clamp(22px, 2.1vw, 28px); + letter-spacing: -0.04em; + line-height: 1.05; +} + +.page-header-block p { + margin: 5px 0 0; + color: var(--text-soft); + font-size: 13px; +} + +.page-header-badges { + display: flex; + gap: 8px; + flex-wrap: wrap; +} + +.page-tab-row { + display: flex; + gap: 6px; + flex-wrap: wrap; +} + +.page-loader, +.empty-block, +.table-empty { + color: var(--text-soft); + text-align: center; + padding: 26px; +} + +.page-pagination { + display: flex; + align-items: center; + justify-content: center; + gap: 10px; +} + +.ui-card { + background: var(--panel); + border: 1px solid var(--line); + border-radius: 6px; + position: relative; +} + +.ui-card-header { + padding: 9px 10px 8px; + border-bottom: 1px solid color-mix(in srgb, var(--line) 80%, transparent); +} + +.ui-card-title { + margin: 0; + font-size: 14px; + display: flex; + align-items: center; + gap: 8px; + letter-spacing: -0.01em; +} + +.ui-card-description { + margin: 8px 0 0; + color: var(--text-soft); + font-size: 12px; +} + +.ui-card-content { + padding: 9px 10px 10px; +} + +.ui-card-footer { + padding: 0 18px 18px; +} + +.ui-button { + border-radius: 4px; + border: 1px solid var(--line); + background: var(--panel); + color: var(--text); + display: inline-flex; + align-items: center; + justify-content: center; + gap: 7px; + font-weight: 600; + cursor: pointer; + transition: transform 120ms ease, background-color 140ms ease, border-color 140ms ease, color 140ms ease; + text-transform: none; +} + +.ui-button:hover { + transform: translateY(-1px); +} + +.ui-button:active { + transform: translateY(0); +} + +.ui-button:disabled { + opacity: 0.55; + cursor: not-allowed; +} + +.ui-button-md { padding: 8px 11px; font-size: 12px; } +.ui-button-sm { padding: 6px 8px; font-size: 11px; } +.ui-button-lg { padding: 12px 16px; font-size: 14px; } +.ui-button-icon { width: 36px; height: 36px; } + +.ui-button-default { + background: var(--tone-900); + border-color: var(--tone-900); + color: var(--panel); + letter-spacing: 0.01em; +} + +.ui-button-outline, +.ui-button-ghost { + background: var(--panel); +} + +.ui-button-ghost:hover, +.ui-button-outline:hover { + background: var(--panel-alt); +} + +.ui-button-success { + background: #edf7f2; + color: var(--ok); + border-color: #c7e2d6; +} + +.ui-button-danger { + background: #faf0f0; + color: var(--danger); + border-color: #e5c9c9; +} + +.ui-badge { + border-radius: 4px; + border: 1px solid var(--line); + padding: 2px 7px; + font-size: 11px; + display: inline-flex; + align-items: center; + gap: 5px; + white-space: nowrap; + font-weight: 600; + letter-spacing: 0.01em; +} + +.ui-badge-default, +.ui-badge-secondary { background: var(--panel-muted); color: var(--text-soft); } +.ui-badge-outline { background: var(--panel); } +.ui-badge-success { background: #e1f5f0; color: #1a4d3f; border-color: #b3d4cc; } +.ui-badge-warning { background: #fef3e0; color: #5c3d1f; border-color: #dcc89f; } +.ui-badge-danger { background: #fce5e5; color: #5c2626; border-color: #d9a5a5; } + +[data-theme='dark'] .ui-badge-success { background: rgba(124, 198, 163, 0.2); color: #a3e4d6; border-color: rgba(124, 198, 163, 0.4); } +[data-theme='dark'] .ui-badge-warning { background: rgba(215, 188, 120, 0.2); color: #e0c884; border-color: rgba(215, 188, 120, 0.4); } +[data-theme='dark'] .ui-badge-danger { background: rgba(222, 152, 152, 0.2); color: #f0a5a5; border-color: rgba(222, 152, 152, 0.4); } + +.ui-progress { + width: 100%; + height: 9px; + border-radius: 999px; + overflow: hidden; + background: color-mix(in srgb, var(--panel-alt) 72%, var(--line)); +} + +.ui-progress-indicator { + height: 100%; + background: var(--accent); +} + +.ui-separator-horizontal { + height: 1px; + background: var(--line); + width: 100%; +} + +.ui-separator-vertical { + width: 1px; + background: var(--line); + align-self: stretch; +} + +.ui-input { + width: 100%; + border: 1px solid var(--line); + border-radius: 4px; + padding: 8px 10px; + background: var(--panel); + color: var(--text); + font: inherit; +} + +.ui-input:focus { + outline: none; + border-color: var(--tone-500); +} + +.ui-textarea { + min-height: 150px; + resize: vertical; +} + +.ui-code { + background: color-mix(in srgb, var(--panel-alt) 76%, #0f1318); + color: color-mix(in srgb, var(--text) 88%, #c7d0db); + border: 1px solid var(--line-strong); + border-radius: 12px; + padding: 12px; + overflow: auto; + line-height: 1.5; +} + +.table-wrap { overflow-x: auto; } + +table { + width: 100%; + border-collapse: collapse; +} + +th, +td { + padding: 8px 8px; + border-bottom: 1px solid var(--line); + text-align: left; + font-size: 13px; +} + +th { + color: var(--text-faint); + font-size: 11px; + text-transform: uppercase; + letter-spacing: 0.06em; + background: color-mix(in srgb, var(--panel-alt) 75%, transparent); +} + +.table-click { cursor: pointer; } +.table-click:hover { background: var(--panel-muted); } + +tbody tr:nth-child(even):not(.table-click:hover) { + background: color-mix(in srgb, var(--panel-alt) 35%, transparent); +} + +.dashboard-page { + display: grid; + gap: 9px; +} + +.dashboard-loading { + display: grid; + gap: 10px; + justify-items: center; + color: var(--text-soft); + padding: 60px 0; +} + +.spinner { + width: 26px; + height: 26px; + border-radius: 999px; + border: 3px solid #d8dce1; + border-top-color: #4d5763; + animation: spin 1s linear infinite; +} + +.dashboard-header { + display: flex; + justify-content: space-between; + gap: 8px; + flex-wrap: wrap; +} + +.dashboard-header-copy h1 { + margin: 0; + font-size: 20px; + letter-spacing: -0.03em; +} + +.dashboard-eyebrow { + display: inline-block; + font-size: 11px; + color: var(--text-faint); + text-transform: uppercase; + letter-spacing: 0.1em; + margin-bottom: 3px; +} + +.dashboard-header-copy p { + color: var(--text-soft); + margin-top: 5px; +} + +.dashboard-header-meta { + display: flex; + gap: 8px; + flex-wrap: wrap; +} + +.dashboard-metric-grid { + display: grid; + grid-template-columns: repeat(4, minmax(0, 1fr)); + gap: 6px; +} + +.dashboard-metric-card { + display: grid; + gap: 6px; +} + +.dashboard-metric-top { + display: flex; + justify-content: space-between; + color: var(--text-soft); + font-size: 13px; +} + +.dashboard-metric-value { + font-size: 20px; + font-weight: 800; + letter-spacing: -0.04em; +} + +.dashboard-metric-detail { + color: var(--text-faint); + font-size: 12px; +} + +.dashboard-main-grid { + display: grid; + grid-template-columns: 1.4fr 1fr; + gap: 6px; +} + +.dashboard-kpi-strip { + border: 1px solid var(--line); + border-radius: 6px; + background: var(--panel); + display: grid; + grid-template-columns: repeat(4, minmax(0, 1fr)); +} + +.dashboard-kpi-strip > div { + padding: 6px 8px; + border-right: 1px solid var(--line); + display: grid; + gap: 3px; +} + +.dashboard-kpi-strip > div:last-child { + border-right: 0; +} + +.dashboard-kpi-strip span { + font-size: 11px; + color: var(--text-faint); + text-transform: uppercase; + letter-spacing: 0.06em; +} + +.dashboard-kpi-strip strong { + font-size: 17px; + letter-spacing: -0.02em; +} + +.dashboard-chart-legend, +.dashboard-progress-row { + display: flex; + align-items: center; + justify-content: space-between; + gap: 8px; +} + +.dashboard-chart-legend span { + display: inline-flex; + align-items: center; + gap: 4px; + color: var(--text-soft); + font-size: 12px; +} + +.dashboard-chart-wrap { + margin-top: 6px; +} + +.dashboard-progress { + max-width: 280px; +} + +.dashboard-feed-list, +.dashboard-events-list { + display: grid; + gap: 6px; +} + +.dashboard-feed-list { + max-height: 300px; + overflow-y: auto; + overflow-x: hidden; + padding-right: 4px; +} + +.dashboard-empty { + color: var(--text-faint); + border: 1px dashed var(--line-strong); + border-radius: 4px; + padding: 10px; + text-align: center; +} + +.dashboard-feed-item, +.dashboard-event-row { + border: 1px solid var(--line); + background: var(--panel-muted); + border-radius: 4px; + padding: 6px; +} + +.dashboard-event-row { + display: flex; + justify-content: space-between; + width: 100%; + text-align: left; + cursor: pointer; +} + +.dashboard-event-row:hover { + border-color: var(--line-strong); +} + +.dashboard-event-row, +.simulate-scenario, +.history-search-wrap, +.approvals-surface { + transition: border-color 140ms ease, background-color 140ms ease, transform 120ms ease; +} + +.simulate-scenario:hover, +.dashboard-event-row:hover { + transform: translateY(-1px); +} + +.dashboard-feed-type, +.dashboard-event-main p { + margin: 0; + font-weight: 700; + font-size: 13px; +} + +.dashboard-feed-repo, +.dashboard-event-main span, +.dashboard-event-meta span { + color: var(--text-faint); + font-size: 12px; +} + +.dashboard-event-main, +.dashboard-event-meta { + display: grid; + gap: 4px; +} + +.dashboard-events-list-head { + display: flex; + justify-content: space-between; + color: var(--text-faint); + font-size: 11px; + text-transform: uppercase; + letter-spacing: 0.06em; +} + +.dashboard-events-table-wrap { + max-height: 320px; + overflow: auto; +} + +.dashboard-events-table { + width: 100%; + border-collapse: collapse; +} + +.dashboard-events-table th, +.dashboard-events-table td { + padding: 7px; + border-bottom: 1px solid var(--line); + font-size: 13px; + text-align: left; +} + +.dashboard-events-table th { + color: var(--text-faint); + font-size: 11px; + text-transform: uppercase; + letter-spacing: 0.06em; + background: color-mix(in srgb, var(--panel-alt) 75%, transparent); +} + +.dashboard-events-header { + display: flex; + align-items: center; + justify-content: space-between; + gap: 10px; + flex-wrap: wrap; +} + +.dashboard-health-strip { + border: 1px solid var(--line); + border-radius: var(--radius-md); + padding: 7px 10px; + background: var(--panel-muted); + display: flex; + justify-content: space-between; +} + +.dashboard-repo-list { + display: grid; + gap: 6px; + max-height: 260px; + overflow-y: auto; + overflow-x: hidden; + padding-right: 4px; +} + +.dashboard-repo-item { + border: 1px solid var(--line); + background: var(--panel-muted); + border-radius: 4px; + padding: 7px; + display: flex; + align-items: center; + justify-content: space-between; + gap: 10px; +} + +.dashboard-repo-item p { + margin: 0; + font-size: 13px; + font-weight: 700; +} + +.dashboard-repo-item span { + color: var(--text-faint); + font-size: 12px; +} + +.dashboard-tooltip { + background: var(--panel-alt); + border: 1px solid var(--line-strong); + color: var(--text); + border-radius: 10px; + padding: 8px 10px; + font-size: 12px; +} + +.dashboard-tooltip-label { + color: var(--text-faint); + margin-bottom: 6px; +} + +.dashboard-tooltip-row { + display: flex; + align-items: center; + justify-content: space-between; + gap: 8px; +} + +.dashboard-tooltip-dot { + width: 7px; + height: 7px; + border-radius: 999px; +} + +.history-filters { + display: grid; + gap: 7px; +} + +.history-search-wrap { + display: flex; + align-items: center; + gap: 8px; + border: 1px solid var(--line); + border-radius: 4px; + padding: 0 10px; + background: color-mix(in srgb, var(--panel) 82%, var(--panel-alt)); +} + +.history-search-wrap .ui-input { + border: 0; + padding-left: 0; +} + +.history-pills { + display: flex; + flex-wrap: wrap; + gap: 6px; +} + +.history-repo-cell { + display: grid; + gap: 2px; +} + +.history-repo-cell span { + font-size: 11px; + color: var(--text-faint); +} + +.history-branch-cell { + display: inline-flex; + align-items: center; + gap: 5px; +} + +.approvals-item-head { + display: flex; + justify-content: space-between; + gap: 8px; + flex-wrap: wrap; + margin-bottom: 8px; +} + +.approvals-item-title-row { + display: flex; + align-items: center; + gap: 8px; + flex-wrap: wrap; +} + +.approvals-item-title-row h3 { + margin: 0; + font-size: 16px; +} + +.approvals-item-head p { + margin: 4px 0 0; + color: var(--text-soft); + font-size: 13px; +} + +.approvals-item-meta { + display: grid; + justify-items: end; + gap: 4px; +} + +.approvals-item-meta span { + display: inline-flex; + align-items: center; + gap: 5px; + font-size: 12px; + color: var(--text-faint); +} + +.approvals-item-grid { + display: grid; + grid-template-columns: 1fr 1fr; + gap: 8px; + margin-bottom: 8px; +} + +.approvals-label { + margin: 0 0 5px; + color: var(--text-faint); + font-size: 11px; + text-transform: uppercase; + letter-spacing: 0.06em; +} + +.approvals-surface { + background: var(--panel-muted); + border: 1px solid var(--line); + border-radius: 4px; + padding: 8px; + font-size: 13px; + line-height: 1.5; +} + +.approvals-risk-row { + display: grid; + grid-template-columns: 130px 1fr; + align-items: center; + gap: 8px; + margin-bottom: 8px; +} + +.approvals-risk-value { + font-size: 24px; + font-weight: 800; + letter-spacing: -0.03em; +} + +.approvals-risks { + display: grid; + gap: 4px; + margin-bottom: 8px; +} + +.approvals-risks div { + display: flex; + align-items: flex-start; + gap: 6px; + font-size: 12px; + color: var(--text-soft); +} + +.approvals-code { + margin: 8px 0; + white-space: pre-wrap; + word-break: break-word; + background: color-mix(in srgb, var(--panel-alt) 76%, #0f1318); + color: color-mix(in srgb, var(--text) 88%, #c7d0db); + border: 1px solid var(--line-strong); + border-radius: 4px; + padding: 12px; +} + +.approvals-timing-row { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 8px; +} + +.approvals-timing-value { + display: inline-flex; + align-items: center; + gap: 8px; +} + +.approvals-timing-value strong { + font-size: 14px; +} + +.approvals-script-editor { + margin: 8px 0; +} + +.approvals-script-head { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 6px; +} + +.approvals-code-editor { + width: 100%; + font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New, monospace; + background: color-mix(in srgb, var(--panel-alt) 76%, #0f1318); + color: color-mix(in srgb, var(--text) 88%, #c7d0db); + border: 1px solid var(--line-strong); + border-radius: 4px; + padding: 12px; + resize: vertical; + line-height: 1.45; +} + +.approvals-loading-note { + margin-top: 10px; + color: var(--text-faint); + font-size: 12px; +} + +.approvals-actions { + margin-top: 8px; + display: flex; + gap: 6px; +} + +.simulate-command-bar { + display: grid; + grid-template-columns: repeat(4, minmax(0, 1fr)); + gap: 8px; +} + +.simulate-command-bar div { + border: 1px solid var(--line); + background: var(--panel-muted); + border-radius: 4px; + padding: 9px 10px; + display: grid; + gap: 2px; +} + +.simulate-command-bar span { + font-size: 11px; + color: var(--text-soft); + text-transform: uppercase; + letter-spacing: 0.04em; +} + +.simulate-command-bar strong { + font-size: 13px; + color: var(--text); +} + +.simulate-shell { + display: grid; + gap: 16px; +} + +.simulate-stage { + display: grid; + gap: 10px; +} + +.simulate-stage-head { + display: grid; + gap: 4px; +} + +.simulate-stage-head h2 { + margin: 0; + font-size: 15px; + letter-spacing: -0.01em; +} + +.simulate-stage-head p { + margin: 0; + font-size: 12px; + color: var(--text-soft); +} + +.simulate-grid { + display: grid; + gap: 0; + border: 1px solid var(--line-strong); + border-radius: 6px; + overflow: hidden; + background: var(--panel); +} + +.simulate-grid-primary, +.simulate-grid-insights { + grid-template-columns: minmax(0, 1fr) minmax(0, 1fr); +} + +.simulate-panel { + border: none; + background: transparent; + border-radius: 0; + padding: 12px 14px; +} + +.simulate-grid .simulate-panel + .simulate-panel { + border-left: 1px solid var(--line); +} + +.simulate-panel-header { + display: grid; + gap: 6px; + padding: 0 0 12px; +} + +.simulate-panel-title { + margin: 0; + display: flex; + align-items: center; + gap: 8px; + font-size: 16px; + letter-spacing: -0.01em; +} + +.simulate-panel-description { + margin: 0; + font-size: 12px; + color: var(--text-soft); +} + +.simulate-panel-body { + display: grid; + gap: 8px; +} + +.simulate-scenarios { + display: grid; + gap: 6px; +} + +.simulate-scenario { + border: 1px solid var(--line); + background: var(--panel-muted); + border-radius: 4px; + display: grid; + grid-template-columns: 30px 1fr auto; + align-items: center; + gap: 8px; + padding: 7px; + text-align: left; + cursor: pointer; +} + +.simulate-scenario.active { + border-color: var(--line-strong); + background: var(--panel-alt); +} + +.simulate-scenario-icon { + width: 28px; + height: 28px; + border-radius: 4px; + background: var(--panel); + border: 1px solid var(--line); + display: grid; + place-items: center; +} + +.simulate-scenario strong { + font-size: 13px; +} + +.simulate-scenario p { + margin: 3px 0 0; + font-size: 12px; + color: var(--text-soft); +} + +.simulate-form, +.simulate-input-grid { + display: grid; + gap: 8px; +} + +.simulate-form-top { + display: flex; + justify-content: space-between; + align-items: center; + gap: 6px; + flex-wrap: wrap; +} + +.simulate-input-grid { + grid-template-columns: 1fr 1fr; +} + +.simulate-input-grid label, +.simulate-form label { + display: grid; + gap: 4px; +} + +.simulate-input-grid span, +.simulate-form label > span { + font-size: 12px; + color: var(--text-soft); +} + +.simulate-result { + border-radius: 4px; + border: 1px solid var(--line); + display: flex; + align-items: center; + gap: 6px; + padding: 8px; + font-size: 13px; +} + +.simulate-result.success { + background: color-mix(in srgb, var(--ok) 14%, var(--panel)); + border-color: color-mix(in srgb, var(--ok) 42%, var(--line)); + color: var(--ok); +} + +.simulate-result.error { + background: color-mix(in srgb, var(--danger) 14%, var(--panel)); + border-color: color-mix(in srgb, var(--danger) 42%, var(--line)); + color: var(--danger); +} + +.simulate-diagnosis { + display: flex; + flex-direction: column; + gap: 12px; + font-size: 13px; +} + +.simulate-placeholder { + border: 1px dashed var(--line-strong); + border-radius: 4px; + padding: 10px; + display: flex; + align-items: center; + gap: 8px; + color: var(--text-soft); + font-size: 13px; +} + +.modal-overlay { + position: fixed; + top: 0; + left: 0; + right: 0; + bottom: 0; + background: rgba(0, 0, 0, 0.5); + display: flex; + align-items: center; + justify-content: center; + z-index: 1000; + backdrop-filter: blur(3px); +} + +.modal-content { + background: var(--panel); + border: 1px solid var(--line); + border-radius: 6px; + width: 90%; + max-width: 500px; + max-height: 600px; + display: flex; + flex-direction: column; + box-shadow: 0 20px 25px rgba(0, 0, 0, 0.3); +} + +.modal-header { + display: flex; + align-items: center; + justify-content: space-between; + padding: 12px 16px; + border-bottom: 1px solid var(--line); + gap: 8px; +} + +.modal-header h2 { + display: flex; + align-items: center; + gap: 8px; + font-size: 1rem; + margin: 0; +} + +.modal-close { + background: none; + border: none; + color: var(--text-secondary); + cursor: pointer; + padding: 4px; + display: flex; + align-items: center; + justify-content: center; +} + +.modal-close:hover { + color: var(--text); +} + +.builder-chat { + flex: 1; + overflow-y: auto; + padding: 12px 16px; + display: flex; + flex-direction: column; + gap: 12px; + font-size: 13px; +} + +.builder-msg { + display: flex; + gap: 8px; + align-items: flex-start; +} + +.builder-msg-assistant { + justify-content: flex-start; +} + +.builder-msg-user { + justify-content: flex-end; +} + +.builder-msg-avatar { + display: flex; + align-items: center; + justify-content: center; + width: 24px; + height: 24px; + flex-shrink: 0; + color: var(--text-secondary); +} + +.builder-msg-content { + background: var(--surface); + border-radius: 4px; + padding: 8px 10px; + max-width: 70%; + word-wrap: break-word; + line-height: 1.4; +} + +.builder-msg-user .builder-msg-content { + background: var(--accent); + color: #000; +} + +.builder-input { + display: flex; + gap: 8px; + padding: 12px 16px; + border-top: 1px solid var(--line); +} + +.builder-input input { + flex: 1; + background: var(--surface); + border: 1px solid var(--line); + border-radius: 4px; + padding: 8px 10px; + color: var(--text); + font-size: 13px; +} + +.builder-input input:focus { + outline: none; + border-color: var(--accent); +} + +.event-flow { + display: flex; + gap: 6px; + flex-wrap: wrap; +} + +.event-flow-item { + display: inline-flex; + align-items: center; + gap: 5px; + border: 1px solid var(--line); + border-radius: 4px; + padding: 4px 7px; + font-size: 12px; + background: color-mix(in srgb, var(--panel) 86%, transparent); +} + +[data-theme='dark'] .ui-button-success { + background: color-mix(in srgb, var(--ok) 18%, var(--panel)); + border-color: color-mix(in srgb, var(--ok) 35%, var(--line)); +} + +[data-theme='dark'] .ui-button-danger { + background: color-mix(in srgb, var(--danger) 18%, var(--panel)); + border-color: color-mix(in srgb, var(--danger) 35%, var(--line)); +} + +.event-flow-item.active { + border-color: var(--line-strong); + background: var(--panel); +} + +.event-flow-item.done { + background: color-mix(in srgb, var(--ok) 14%, var(--panel)); + color: var(--ok); + border-color: color-mix(in srgb, var(--ok) 42%, var(--line)); +} + +.event-detail-grid { + display: grid; + grid-template-columns: 1fr 1fr; + gap: 8px; +} + +.event-outcome-list, +.event-risk-breakdown, +.event-diagnosis-panel { + display: grid; + gap: 6px; +} + +.event-outcome-list div, +.event-risk-breakdown div { + display: flex; + justify-content: space-between; + font-size: 13px; +} + +.event-risk-panel strong { + font-size: 22px; + letter-spacing: -0.03em; +} + +.event-timeline { + display: grid; + gap: 10px; +} + +.event-timeline-item { + display: grid; + grid-template-columns: 10px 1fr; + gap: 10px; + align-items: start; +} + +.event-timeline-marker { + width: 10px; + height: 10px; + margin-top: 6px; + border-radius: 50%; + background: var(--accent); +} + +.event-timeline-content { + border: 1px solid var(--line); + border-radius: 8px; + padding: 10px 12px; + background: color-mix(in srgb, var(--panel) 92%, transparent); +} + +.event-timeline-head { + display: flex; + justify-content: space-between; + gap: 10px; + align-items: center; +} + +.event-timeline-meta { + display: flex; + gap: 8px; + margin-top: 4px; + font-size: 12px; + color: var(--muted); +} + +.event-timeline-content p { + margin: 8px 0 0; + font-size: 13px; + color: var(--text); + opacity: 0.9; +} + +.spin { + animation: spin 1s linear infinite; +} + +@keyframes spin { + to { transform: rotate(360deg); } +} + +@media (max-width: 1100px) { + .dashboard-metric-grid, + .dashboard-main-grid, + .simulate-grid, + .event-detail-grid, + .approvals-item-grid { + grid-template-columns: 1fr; + } + + .simulate-command-bar { + grid-template-columns: 1fr 1fr; + } + + .simulate-grid .simulate-panel + .simulate-panel { + border-left: none; + border-top: 1px solid var(--line); + } + + .app-shell { + grid-template-columns: 1fr; + } + + .app-sidebar { + position: static; + height: auto; + } + + .app-main { + padding: 10px; + } + + .dashboard-kpi-strip { + grid-template-columns: 1fr 1fr; + } +} + +@media (max-width: 760px) { + .simulate-command-bar { + grid-template-columns: 1fr; + } +} diff --git a/frontend/src/lib/utils.js b/frontend/src/lib/utils.js new file mode 100644 index 000000000..9d7d3ea7c --- /dev/null +++ b/frontend/src/lib/utils.js @@ -0,0 +1,6 @@ +import { clsx } from 'clsx' +import { twMerge } from 'tailwind-merge' + +export function cn(...inputs) { + return twMerge(clsx(inputs)) +} diff --git a/frontend/src/main.jsx b/frontend/src/main.jsx new file mode 100644 index 000000000..da11795dd --- /dev/null +++ b/frontend/src/main.jsx @@ -0,0 +1,24 @@ +import React from 'react' +import ReactDOM from 'react-dom/client' +import App from './App.jsx' +import './index.css' +import { Toaster } from 'react-hot-toast' + +ReactDOM.createRoot(document.getElementById('root')).render( + + + + +) diff --git a/frontend/src/pages/Approvals.jsx b/frontend/src/pages/Approvals.jsx new file mode 100644 index 000000000..5eec9de26 --- /dev/null +++ b/frontend/src/pages/Approvals.jsx @@ -0,0 +1,390 @@ +import React, { useCallback, useEffect, useState } from 'react' +import { + AlertTriangle, + CheckCircle2, + ChevronDown, + ChevronUp, + CircleGauge, + Clock3, + Eye, + FileCode2, + Shield, + ShieldCheck, + XCircle, +} from 'lucide-react' +import { formatDistanceToNow } from 'date-fns' +import toast from 'react-hot-toast' +import { Badge } from '../components/ui/badge' +import { Button } from '../components/ui/button' +import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '../components/ui/card' +import { Progress } from '../components/ui/progress' + +function riskVariant(level) { + if (level === 'low') return 'success' + if (level === 'high') return 'danger' + return 'warning' +} + +function parseApiDate(value) { + if (!value) return null + const raw = String(value) + const hasTimezone = /[zZ]$|[+-]\d{2}:\d{2}$/.test(raw) + return new Date(hasTimezone ? raw : `${raw}Z`) +} + +function ApprovalItem({ approval, onApprove, onReject, onLoadDetails }) { + const [expanded, setExpanded] = useState(false) + const [note, setNote] = useState('') + const [loading, setLoading] = useState(false) + const [details, setDetails] = useState(null) + const [detailsLoading, setDetailsLoading] = useState(false) + const [scriptDraft, setScriptDraft] = useState(approval.fix_script || '') + + useEffect(() => { + let active = true + + async function loadDetails() { + if (!expanded || details) return + setDetailsLoading(true) + const payload = await onLoadDetails(approval.id) + if (active && payload) setDetails(payload) + if (active) setDetailsLoading(false) + } + + loadDetails() + + return () => { + active = false + } + }, [expanded, details, approval.id, onLoadDetails]) + + const entry = details || approval + + useEffect(() => { + setScriptDraft(entry.fix_script || '') + }, [entry.fix_script]) + + async function handleApprove() { + setLoading(true) + await onApprove(approval.id, note, scriptDraft) + setLoading(false) + } + + async function handleReject() { + setLoading(true) + await onReject(approval.id, note) + setLoading(false) + } + + const riskPct = Math.round((entry.risk_score || 0) * 100) + const estimatedDuration = Math.round(entry.estimated_duration_seconds || 0) + + return ( + + +
+
+
+

{approval.repo_full_name}

+ Pending review +
+

{entry.branch} · {entry.commit_sha?.slice(0, 7) || 'unknown'}

+
+
+ + + {entry.risk_level || 'unknown'} risk + + + + {(() => { + const createdAt = parseApiDate(entry.created_at) + if (!createdAt || Number.isNaN(createdAt.getTime())) return 'unknown' + return formatDistanceToNow(createdAt, { addSuffix: true }) + })()} + +
+
+ +
+
+

Root cause

+
{entry.root_cause || 'No diagnosis available.'}
+
+
+

Proposed fix

+
{entry.proposed_fix || 'No proposed fix available.'}
+
+
+ +
+
+

Risk score

+
{riskPct}%
+
+ +
+ +
+ Estimated execution time +
+ {entry.timing_level || 'unknown'} + {estimatedDuration > 0 ? `${estimatedDuration}s` : '--'} +
+
+ + {entry.timing_reasons?.length > 0 && ( +
+ {entry.timing_reasons.map((reason, index) => ( +
+ + {reason} +
+ ))} +
+ )} + + {entry.risk_reasons?.length > 0 && ( +
+ {entry.risk_reasons.map((reason, index) => ( +
+ + {reason} +
+ ))} +
+ )} + + + + {expanded && detailsLoading &&
Loading latest approval details...
} + {expanded && ( +
+
+ Proposed script (editable before approval) + +
+