From 6499bc770ebe565fc1d54f87e1f673054d6c1d73 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sat, 11 Apr 2026 22:17:24 +0000 Subject: [PATCH 1/2] Initial plan From 41900640c57d26fe68c10395d8be91571a7ec03d Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sat, 11 Apr 2026 22:22:46 +0000 Subject: [PATCH 2/2] chore: remove outdated, unused code and artifacts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Remove AI tool config folders (.agents, .augment, .claude, .continue, .junie, .windsurf, .zencoder, .zenflow) that were tracked despite .gitignore - Remove IDE artifact folders (.idea, .vscode-extension) - Remove backup/archive directories (_safe_backup, archive) - Remove nested git mirror directory (main/) with hundreds of temp files and git objects - Remove unknown orphan directory (1/) and duplicate deployment repo (tradehax-repo/) - Remove standalone services not part of Next.js app (ai/, ai-micro/) - Remove ML/AI training data directories and JSONL files (data/, *.jsonl) - Remove Kubernetes configs (k8s/) and Namecheap deploy scripts (deploy/) - Remove 12 unused/outdated GitHub Actions workflows, keeping only build-check, codeql, vercel-deploy - Remove committed secret-containing env blueprints (AI_LIVE_ENV_BLUEPRINT.env, AI_ENVIRONMENT_TEMPLATE.env, .env.vercel.production.template) β€” these contained real Vercel project/secret values - Remove 60+ excessive documentation/planning dump files - Remove legacy configs (.gitlab-ci.yml, .htaccess, qodana.yaml, Untitled-1.yml, .lighthouserc.json) - Remove planning docs from docs/ directory - Remove index.html, skills-lock.json, tools/ (Python image tools) - Update .gitignore to prevent reintroduction: add patterns for *.env, *.env.*, main/, tradehax-repo/, 1/, data/, *.jsonl, k8s/, deploy/, sync-log.txt, *.code-workspace, dist/, coverage/, .turbo/, .cache/, storybook-static/ Agent-Logs-Url: https://github.com/DarkModder33/main/sessions/501b28cd-19d0-4c9a-9b17-73816395b530 Co-authored-by: DarkModder33 <254196519+DarkModder33@users.noreply.github.com> --- .../AGENTS.md | 68 - .../CLAUDE.md | 1 - .../README.md | 116 - .../supabase-postgres-best-practices/SKILL.md | 64 - .../references/_contributing.md | 171 - .../references/_sections.md | 39 - .../references/_template.md | 34 - .../references/advanced-full-text-search.md | 55 - .../references/advanced-jsonb-indexing.md | 49 - .../references/conn-idle-timeout.md | 46 - .../references/conn-limits.md | 44 - .../references/conn-pooling.md | 41 - .../references/conn-prepared-statements.md | 46 - .../references/data-batch-inserts.md | 54 - .../references/data-n-plus-one.md | 53 - .../references/data-pagination.md | 50 - .../references/data-upsert.md | 50 - .../references/lock-advisory.md | 56 - .../references/lock-deadlock-prevention.md | 68 - .../references/lock-short-transactions.md | 50 - .../references/lock-skip-locked.md | 54 - .../references/monitor-explain-analyze.md | 45 - .../references/monitor-pg-stat-statements.md | 55 - .../references/monitor-vacuum-analyze.md | 55 - .../references/query-composite-indexes.md | 44 - .../references/query-covering-indexes.md | 40 - .../references/query-index-types.md | 48 - .../references/query-missing-indexes.md | 43 - .../references/query-partial-indexes.md | 45 - .../references/schema-constraints.md | 80 - .../references/schema-data-types.md | 46 - .../references/schema-foreign-key-indexes.md | 59 - .../schema-lowercase-identifiers.md | 55 - .../references/schema-partitioning.md | 55 - .../references/schema-primary-keys.md | 61 - .../references/security-privileges.md | 54 - .../references/security-rls-basics.md | 50 - .../references/security-rls-performance.md | 57 - .../AGENTS.md | 68 - .../CLAUDE.md | 1 - .../README.md | 116 - .../supabase-postgres-best-practices/SKILL.md | 64 - .../references/_contributing.md | 171 - .../references/_sections.md | 39 - .../references/_template.md | 34 - .../references/advanced-full-text-search.md | 55 - .../references/advanced-jsonb-indexing.md | 49 - .../references/conn-idle-timeout.md | 46 - .../references/conn-limits.md | 44 - .../references/conn-pooling.md | 41 - .../references/conn-prepared-statements.md | 46 - .../references/data-batch-inserts.md | 54 - .../references/data-n-plus-one.md | 53 - .../references/data-pagination.md | 50 - .../references/data-upsert.md | 50 - .../references/lock-advisory.md | 56 - .../references/lock-deadlock-prevention.md | 68 - .../references/lock-short-transactions.md | 50 - .../references/lock-skip-locked.md | 54 - .../references/monitor-explain-analyze.md | 45 - .../references/monitor-pg-stat-statements.md | 55 - .../references/monitor-vacuum-analyze.md | 55 - .../references/query-composite-indexes.md | 44 - .../references/query-covering-indexes.md | 40 - .../references/query-index-types.md | 48 - .../references/query-missing-indexes.md | 43 - .../references/query-partial-indexes.md | 45 - .../references/schema-constraints.md | 80 - .../references/schema-data-types.md | 46 - .../references/schema-foreign-key-indexes.md | 59 - .../schema-lowercase-identifiers.md | 55 - .../references/schema-partitioning.md | 55 - .../references/schema-primary-keys.md | 61 - .../references/security-privileges.md | 54 - .../references/security-rls-basics.md | 50 - .../references/security-rls-performance.md | 57 - .../AGENTS.md | 68 - .../CLAUDE.md | 1 - .../README.md | 116 - .../supabase-postgres-best-practices/SKILL.md | 64 - .../references/_contributing.md | 171 - .../references/_sections.md | 39 - .../references/_template.md | 34 - .../references/advanced-full-text-search.md | 55 - .../references/advanced-jsonb-indexing.md | 49 - .../references/conn-idle-timeout.md | 46 - .../references/conn-limits.md | 44 - .../references/conn-pooling.md | 41 - .../references/conn-prepared-statements.md | 46 - .../references/data-batch-inserts.md | 54 - .../references/data-n-plus-one.md | 53 - .../references/data-pagination.md | 50 - .../references/data-upsert.md | 50 - .../references/lock-advisory.md | 56 - .../references/lock-deadlock-prevention.md | 68 - .../references/lock-short-transactions.md | 50 - .../references/lock-skip-locked.md | 54 - .../references/monitor-explain-analyze.md | 45 - .../references/monitor-pg-stat-statements.md | 55 - .../references/monitor-vacuum-analyze.md | 55 - .../references/query-composite-indexes.md | 44 - .../references/query-covering-indexes.md | 40 - .../references/query-index-types.md | 48 - .../references/query-missing-indexes.md | 43 - .../references/query-partial-indexes.md | 45 - .../references/schema-constraints.md | 80 - .../references/schema-data-types.md | 46 - .../references/schema-foreign-key-indexes.md | 59 - .../schema-lowercase-identifiers.md | 55 - .../references/schema-partitioning.md | 55 - .../references/schema-primary-keys.md | 61 - .../references/security-privileges.md | 54 - .../references/security-rls-basics.md | 50 - .../references/security-rls-performance.md | 57 - .../AGENTS.md | 68 - .../CLAUDE.md | 1 - .../README.md | 116 - .../supabase-postgres-best-practices/SKILL.md | 64 - .../references/_contributing.md | 171 - .../references/_sections.md | 39 - .../references/_template.md | 34 - .../references/advanced-full-text-search.md | 55 - .../references/advanced-jsonb-indexing.md | 49 - .../references/conn-idle-timeout.md | 46 - .../references/conn-limits.md | 44 - .../references/conn-pooling.md | 41 - .../references/conn-prepared-statements.md | 46 - .../references/data-batch-inserts.md | 54 - .../references/data-n-plus-one.md | 53 - .../references/data-pagination.md | 50 - .../references/data-upsert.md | 50 - .../references/lock-advisory.md | 56 - .../references/lock-deadlock-prevention.md | 68 - .../references/lock-short-transactions.md | 50 - .../references/lock-skip-locked.md | 54 - .../references/monitor-explain-analyze.md | 45 - .../references/monitor-pg-stat-statements.md | 55 - .../references/monitor-vacuum-analyze.md | 55 - .../references/query-composite-indexes.md | 44 - .../references/query-covering-indexes.md | 40 - .../references/query-index-types.md | 48 - .../references/query-missing-indexes.md | 43 - .../references/query-partial-indexes.md | 45 - .../references/schema-constraints.md | 80 - .../references/schema-data-types.md | 46 - .../references/schema-foreign-key-indexes.md | 59 - .../schema-lowercase-identifiers.md | 55 - .../references/schema-partitioning.md | 55 - .../references/schema-primary-keys.md | 61 - .../references/security-privileges.md | 54 - .../references/security-rls-basics.md | 50 - .../references/security-rls-performance.md | 57 - .env.vercel.production.template | 220 - .github/workflows/aggressive-proof-gate.yml | 104 - .github/workflows/ai-micro-ci.yml | 37 - .github/workflows/ethicalcheck.yml | 69 - .github/workflows/github-pages.yml | 47 - .github/workflows/hivemind-quality-gate.yml | 57 - .github/workflows/install-hooks-test.yml | 54 - .../workflows/intelligence-ingest-quality.yml | 48 - .github/workflows/lighthouse-ci.yml | 49 - .../workflows/live-delta-dataset-refresh.yml | 63 - .github/workflows/nextjs.yml.disabled | 93 - .github/workflows/qodana_code_quality.yml | 28 - .github/workflows/readiness-gate.yml | 46 - .gitignore | Bin 731 -> 755 bytes .gitlab-ci.yml | 89 - .htaccess | 24 - .idea/.gitignore | 10 - .idea/IntelliLang.xml | 151 - .idea/go.imports.xml | 11 - .idea/misc.xml | 6 - .idea/modules.xml | 8 - .idea/tradehax.iml | 9 - .idea/vcs.xml | 6 - .../AGENTS.md | 68 - .../CLAUDE.md | 1 - .../README.md | 116 - .../supabase-postgres-best-practices/SKILL.md | 64 - .../references/_contributing.md | 171 - .../references/_sections.md | 39 - .../references/_template.md | 34 - .../references/advanced-full-text-search.md | 55 - .../references/advanced-jsonb-indexing.md | 49 - .../references/conn-idle-timeout.md | 46 - .../references/conn-limits.md | 44 - .../references/conn-pooling.md | 41 - .../references/conn-prepared-statements.md | 46 - .../references/data-batch-inserts.md | 54 - .../references/data-n-plus-one.md | 53 - .../references/data-pagination.md | 50 - .../references/data-upsert.md | 50 - .../references/lock-advisory.md | 56 - .../references/lock-deadlock-prevention.md | 68 - .../references/lock-short-transactions.md | 50 - .../references/lock-skip-locked.md | 54 - .../references/monitor-explain-analyze.md | 45 - .../references/monitor-pg-stat-statements.md | 55 - .../references/monitor-vacuum-analyze.md | 55 - .../references/query-composite-indexes.md | 44 - .../references/query-covering-indexes.md | 40 - .../references/query-index-types.md | 48 - .../references/query-missing-indexes.md | 43 - .../references/query-partial-indexes.md | 45 - .../references/schema-constraints.md | 80 - .../references/schema-data-types.md | 46 - .../references/schema-foreign-key-indexes.md | 59 - .../schema-lowercase-identifiers.md | 55 - .../references/schema-partitioning.md | 55 - .../references/schema-primary-keys.md | 61 - .../references/security-privileges.md | 54 - .../references/security-rls-basics.md | 50 - .../references/security-rls-performance.md | 57 - .lighthouserc.json | 20 - .vscode-extension/package.json | 93 - .vscode-extension/src/extension.ts | 218 - .../AGENTS.md | 68 - .../CLAUDE.md | 1 - .../README.md | 116 - .../supabase-postgres-best-practices/SKILL.md | 64 - .../references/_contributing.md | 171 - .../references/_sections.md | 39 - .../references/_template.md | 34 - .../references/advanced-full-text-search.md | 55 - .../references/advanced-jsonb-indexing.md | 49 - .../references/conn-idle-timeout.md | 46 - .../references/conn-limits.md | 44 - .../references/conn-pooling.md | 41 - .../references/conn-prepared-statements.md | 46 - .../references/data-batch-inserts.md | 54 - .../references/data-n-plus-one.md | 53 - .../references/data-pagination.md | 50 - .../references/data-upsert.md | 50 - .../references/lock-advisory.md | 56 - .../references/lock-deadlock-prevention.md | 68 - .../references/lock-short-transactions.md | 50 - .../references/lock-skip-locked.md | 54 - .../references/monitor-explain-analyze.md | 45 - .../references/monitor-pg-stat-statements.md | 55 - .../references/monitor-vacuum-analyze.md | 55 - .../references/query-composite-indexes.md | 44 - .../references/query-covering-indexes.md | 40 - .../references/query-index-types.md | 48 - .../references/query-missing-indexes.md | 43 - .../references/query-partial-indexes.md | 45 - .../references/schema-constraints.md | 80 - .../references/schema-data-types.md | 46 - .../references/schema-foreign-key-indexes.md | 59 - .../schema-lowercase-identifiers.md | 55 - .../references/schema-partitioning.md | 55 - .../references/schema-primary-keys.md | 61 - .../references/security-privileges.md | 54 - .../references/security-rls-basics.md | 50 - .../references/security-rls-performance.md | 57 - .zencoder/rules/repo.md | 59 - .../AGENTS.md | 68 - .../CLAUDE.md | 1 - .../README.md | 116 - .../supabase-postgres-best-practices/SKILL.md | 64 - .../references/_contributing.md | 171 - .../references/_sections.md | 39 - .../references/_template.md | 34 - .../references/advanced-full-text-search.md | 55 - .../references/advanced-jsonb-indexing.md | 49 - .../references/conn-idle-timeout.md | 46 - .../references/conn-limits.md | 44 - .../references/conn-pooling.md | 41 - .../references/conn-prepared-statements.md | 46 - .../references/data-batch-inserts.md | 54 - .../references/data-n-plus-one.md | 53 - .../references/data-pagination.md | 50 - .../references/data-upsert.md | 50 - .../references/lock-advisory.md | 56 - .../references/lock-deadlock-prevention.md | 68 - .../references/lock-short-transactions.md | 50 - .../references/lock-skip-locked.md | 54 - .../references/monitor-explain-analyze.md | 45 - .../references/monitor-pg-stat-statements.md | 55 - .../references/monitor-vacuum-analyze.md | 55 - .../references/query-composite-indexes.md | 44 - .../references/query-covering-indexes.md | 40 - .../references/query-index-types.md | 48 - .../references/query-missing-indexes.md | 43 - .../references/query-partial-indexes.md | 45 - .../references/schema-constraints.md | 80 - .../references/schema-data-types.md | 46 - .../references/schema-foreign-key-indexes.md | 59 - .../schema-lowercase-identifiers.md | 55 - .../references/schema-partitioning.md | 55 - .../references/schema-primary-keys.md | 61 - .../references/security-privileges.md | 54 - .../references/security-rls-basics.md | 50 - .../references/security-rls-performance.md | 57 - .zenflow/settings.json | 6 - 1/tradehaxai-assistant | 1 - 90_DAY_EXECUTION_PLAN.md | 250 - AGENTS.md | 23 - AI_ENVIRONMENT_STANDARDS.md | 43 - AI_ENVIRONMENT_TEMPLATE.env | 212 - AI_LIVE_ENV_BLUEPRINT.env | 174 - AI_NAVIGATOR_IMPLEMENTATION_PLAN.md | 53 - AI_SETUP_SUMMARY.md | 56 - API_DOCUMENTATION.md | 510 - BLOG_PAGE_VERIFICATION.md | 229 - BUILD_COMPLETE.md | 408 - CLEANUP_SUMMARY.md | 235 - COMPLETE_AUTOMATION_GUIDE.md | 485 - COMPLETE_DEPLOYMENT_GUIDE.md | 15 - CUSTOM_LLM_MODEL_PLAN.md | 48 - DEPLOYMENT_CHECKLIST.md | 15 - DEPLOYMENT_FINAL_SUMMARY.md | 10 - DEPLOYMENT_FIX_CHECKLIST.md | 9 - DEPLOYMENT_FIX_SUMMARY.md | 8 - DEPLOYMENT_PATHS.md | 13 - DEPLOYMENT_QUICKSTART.md | 128 - DEPLOYMENT_READY.txt | 218 - DIGITAL_EMPIRE_STRATEGY.md | 225 - DISCORD_APP_SETUP.md | 65 - DNS_COMPARISON_TABLE.md | 235 - DNS_CONFIGURATION_SUMMARY.md | 271 - DNS_INDEX.md | 277 - DNS_INSPECTION_REPORT.md | 361 - DNS_QUICK_FIX.md | 104 - DOCS_INDEX.md | 73 - EXECUTION_SUMMARY.md | 368 - FINAL_STATUS_REPORT.md | 200 - FINTECH_PAYMENT_RAILS_SETUP.md | 105 - GITHUB_SECRETS_SETUP.md | 323 - GITHUB_SYNC_COMPLETE.md | 126 - GITLAB_AGENT_DEPLOYMENT.md | 149 - HANDOFF_BUNDLE.md | 216 - HARD_LAUNCH_RUNBOOK.md | 54 - HF_DATASET_UPLOAD.md | 153 - HF_FINE_TUNING_WORKFLOW.md | 104 - HF_INTEGRATION_GUIDE.md | 255 - HF_SETUP_GUIDE.md | 349 - IDE_AUTOMATION_WORKFLOW.md | 53 - IDE_PIPELINE_READY.md | 425 - IDE_PIPELINE_WORKFLOW.md | 419 - INTEGRATION_GUIDE.md | 381 - INTELLIGENCE_BUILD_LOG.md | 281 - KUBERNETES_DEPLOYMENT_STATUS.md | 341 - KUBERNETES_READY.md | 325 - LOCAL_REPO_WORKFLOW.md | 38 - MONETIZATION_GUIDE.md | 413 - NAMECHEAP_CPANEL_DEPLOYMENT.md | 520 - NAMECHEAP_MIGRATION_CHECKLIST.md | 80 - PERMISSIVE_CONFIG.md | 129 - PIPELINE_QUICKSTART.md | 32 - QUICK_START.md | 230 - QUICK_VISUAL_FIX.md | 251 - SETUP_VERIFICATION.md | 207 - TESTING_GUIDE.md | 347 - TRADEBOT_TRAINING_PIPELINE.md | 126 - TRADEHAX_AI_PLATFORM_SUMMARY.md | 188 - TRANSFORMATION_COMPLETE.md | 298 - Untitled-1.yml | 27 - VERCEL_BRANCH_FIX.md | 178 - VERCEL_DEPLOYMENT_TROUBLESHOOTING.md | 544 - VERCEL_DIAGNOSIS.md | 197 - VERCEL_DOMAIN_SETUP.md | 261 - VERCEL_STATIC_EXPORT_FIX.md | 193 - _safe_backup/NeuralHub.jsx | 375 - _safe_backup/massive-storage.js | 197 - _safe_backup/package.json | 23 - ai-micro/Dockerfile | 11 - ai-micro/package.json | 21 - ai-micro/src/index.ts | 59 - ai-micro/tsconfig.json | 14 - ai-training-set.jsonl | 27 - ai/server/index.ts | 20 - ai/server/model.ts | 51 - ai/server/security-middleware.ts | 69 - archive/docs/AI_LLM_INTEGRATION.md | 887 - archive/docs/AI_PROMPTS.md | 53 - archive/docs/AUDIO_DEPLOYMENT_GUIDE.md | 227 - archive/docs/CLEANUP_COMPLETE.md | 183 - archive/docs/COMPLETION_SUMMARY.md | 259 - archive/docs/CRYPTO_PAGE_SECURITY_AUDIT.md | 284 - archive/docs/CURRENT_STATUS.md | 505 - archive/docs/DELIVERABLES_SUMMARY.md | 309 - archive/docs/DEPLOYMENT.md | 79 - archive/docs/DEPLOYMENT_CHECKLIST.md | 131 - archive/docs/DEPLOYMENT_COMPLETE.md | 9 - archive/docs/DEPLOYMENT_FLOW_DIAGRAM.md | 248 - archive/docs/DEPLOYMENT_GUIDE.md | 502 - .../docs/DEPLOYMENT_IMPLEMENTATION_SUMMARY.md | 245 - archive/docs/DEPLOYMENT_QUICK_REF.md | 123 - archive/docs/DEPLOYMENT_SUMMARY.md | 335 - archive/docs/DEPLOYMENT_SYNC_GUIDE.md | 271 - .../docs/DEPLOYMENT_SYNC_IMPLEMENTATION.md | 235 - archive/docs/DNS_SETUP_INSTRUCTIONS.md | 321 - archive/docs/DOMAIN_SETUP.md | 503 - archive/docs/DOMAIN_SETUP_GUIDE.md | 247 - archive/docs/EMULATOR_README.md | 149 - archive/docs/FINAL_SUMMARY.md | 316 - archive/docs/HYPERBOREA_PHASE3_LAUNCH.md | 312 - archive/docs/IMPLEMENTATION_COMPLETE.md | 395 - archive/docs/IMPLEMENTATION_SUMMARY_OLD.md | 419 - archive/docs/INDEX.md | 203 - archive/docs/INTEGRATION_SUMMARY.md | 470 - archive/docs/ISSUES_AND_RESOLUTIONS.md | 479 - .../docs/ISSUE_51_IMPLEMENTATION_SUMMARY.md | 613 - archive/docs/LAUNCH_CHECKLIST.md | 284 - archive/docs/MIGRATION_SUMMARY.md | 101 - archive/docs/MONETIZATION_SETUP.md | 200 - archive/docs/NFT_IMPLEMENTATION_SUMMARY.md | 303 - archive/docs/NFT_MINT_GUIDE.md | 303 - archive/docs/PAYPAL_SETUP_GUIDE.md | 232 - archive/docs/PHASE_2_COMPLETION_REPORT.md | 346 - archive/docs/PRODUCTION_DEPLOYMENT_SUMMARY.md | 381 - archive/docs/PROJECT_STRUCTURE.md | 419 - archive/docs/QUICK_API_REFERENCE.md | 175 - archive/docs/QUICK_DEPLOY.md | 103 - archive/docs/QUICK_DEPLOY_CHECKLIST.md | 349 - archive/docs/README.md | 19 - archive/docs/README_PRODUCTION.md | 385 - archive/docs/REBUILD_COMPLETE.md | 90 - archive/docs/REBUILD_SUMMARY.md | 214 - archive/docs/ROM_LIBRARY.md | 206 - archive/docs/ROM_MANIFEST.md | 48 - archive/docs/SECURITY_AUDIT_REPORT.md | 107 - archive/docs/SECURITY_AUDIT_REPORT_2025.md | 310 - archive/docs/SECURITY_FIX.md | 69 - archive/docs/SECURITY_HARDENING.md | 383 - archive/docs/SETUP_COMPLETE.md | 101 - archive/docs/SETUP_INSTRUCTIONS_FOR_OWNER.md | 171 - archive/docs/SHAMROCK_SETUP.md | 351 - archive/docs/TASK_SYSTEM_README.md | 347 - archive/docs/TODO.md | 48 - archive/docs/VERCEL_ANALYTICS.md | 464 - archive/docs/VERCEL_API_SETUP.md | 553 - archive/docs/VERCEL_DEPLOYMENT_CHECKLIST.md | 353 - archive/docs/VERCEL_DNS_SETUP.md | 218 - archive/legacy-code/.azure/plan.copilotmd | 103 - archive/legacy-code/.zencoder/rules/repo.md | 33 - archive/legacy-code/Anchor.toml | 31 - archive/legacy-code/Makefile | 13 - archive/legacy-code/README.md | 21 - archive/legacy-code/clover-exchange.js | 281 - archive/legacy-code/clover-goals.js | 411 - archive/legacy-code/config.js | 33 - archive/legacy-code/deploy-mainnet.sh | 175 - archive/legacy-code/legacy-games/README.md | 34 - archive/legacy-code/legacy-games/convert.html | 191 - .../legacy-code/legacy-games/emulator.html | 1599 - .../legacy-games/featured-roms.html | 406 - .../legacy-code/legacy-games/games-index.html | 155 - archive/legacy-code/legacy-games/games.html | 372 - .../legacy-games/goals-dashboard.html | 738 - archive/legacy-code/legacy-games/hub.html | 350 - .../legacy-code/legacy-games/hyperborea.html | 200 - archive/legacy-code/legacy-games/index.html | 345 - .../legacy-code/legacy-games/mario-auto.html | 135 - .../legacy-code/legacy-games/mario-debug.html | 214 - .../legacy-games/mario-simple.html | 226 - .../legacy-code/legacy-games/mario-test.html | 69 - archive/legacy-code/legacy-games/mario.html | 287 - archive/legacy-code/legacy-games/memory.html | 345 - archive/legacy-code/legacy-games/play.html | 401 - .../legacy-games/quick-play-new.html | 177 - .../legacy-code/legacy-games/quick-play.html | 284 - archive/legacy-code/legacy-games/retro.html | 329 - .../legacy-code/legacy-games/snake-demo.html | 332 - archive/legacy-code/legacy-games/snake.html | 396 - archive/legacy-code/legacy-games/spades.html | 267 - .../legacy-code/legacy-games/test-live.html | 30 - archive/legacy-code/legacy-games/tetris.html | 590 - archive/legacy-code/legacy-games/topup.html | 173 - .../legacy-games/zelda-simple.html | 226 - .../legacy-code/legacy-games/zelda-test.html | 69 - archive/legacy-code/legacy-games/zelda.html | 277 - archive/legacy-code/main.js | 787 - archive/legacy-code/play-timer-integration.js | 204 - archive/legacy-code/play-timer.js | 278 - archive/legacy-code/program/.gitignore | 7 - archive/legacy-code/program/.prettierignore | 7 - archive/legacy-code/program/Anchor.toml | 22 - archive/legacy-code/program/Cargo.lock | 2254 - archive/legacy-code/program/Cargo.toml | 14 - archive/legacy-code/program/README.md | 139 - .../legacy-code/program/migrations/deploy.ts | 12 - archive/legacy-code/program/package.json | 20 - .../program/programs/counter/Cargo.toml | 22 - .../program/programs/counter/Xargo.toml | 2 - .../program/programs/counter/src/lib.rs | 158 - archive/legacy-code/program/tests/counter.ts | 155 - archive/legacy-code/program/tsconfig.json | 10 - archive/legacy-code/schema.json | 80 - archive/legacy-code/server.js | 15 - archive/legacy-code/spades-engine.js | 296 - archive/legacy-code/spades-game.js | 502 - archive/legacy-code/test-critical-path.js | 212 - archive/legacy-code/test-endpoints.mjs | 79 - archive/legacy-code/test-thorough.js | 457 - archive/legacy-code/update-backend-config.js | 100 - archive/legacy-code/web3-rewards.js | 525 - archive/legacy-code/wrangler.toml | 19 - archive/portfolio-old/portfolio/404.html | 11 - .../portfolio/MichaelSFlahertyResume.pdf | Bin 104856 -> 0 bytes archive/portfolio-old/portfolio/about.html | 11 - .../portfolio-old/portfolio/assets/logo.svg | 9 - .../portfolio-old/portfolio/assets/style.css | 35 - .../portfolio-old/portfolio/blog/index.html | 11 - archive/portfolio-old/portfolio/index.html | 82 - .../portfolio/pdfjs/build/pdf.js | 15995 ---- .../portfolio/pdfjs/build/pdf.sandbox.js | 282 - .../portfolio/pdfjs/build/pdf.worker.js | 65026 ---------------- .../portfolio-old/portfolio/pdfjs/viewer.html | 432 - .../portfolio/pdfjs/web/cmaps/78-EUC-H.bcmap | Bin 2404 -> 0 bytes .../portfolio/pdfjs/web/cmaps/78-EUC-V.bcmap | Bin 173 -> 0 bytes .../portfolio/pdfjs/web/cmaps/78-H.bcmap | Bin 2379 -> 0 bytes .../portfolio/pdfjs/web/cmaps/78-RKSJ-H.bcmap | Bin 2398 -> 0 bytes .../portfolio/pdfjs/web/cmaps/78-RKSJ-V.bcmap | Bin 173 -> 0 bytes .../portfolio/pdfjs/web/cmaps/78-V.bcmap | Bin 169 -> 0 bytes .../pdfjs/web/cmaps/78ms-RKSJ-H.bcmap | Bin 2651 -> 0 bytes .../pdfjs/web/cmaps/78ms-RKSJ-V.bcmap | Bin 290 -> 0 bytes .../pdfjs/web/cmaps/83pv-RKSJ-H.bcmap | Bin 905 -> 0 bytes .../pdfjs/web/cmaps/90ms-RKSJ-H.bcmap | Bin 721 -> 0 bytes .../pdfjs/web/cmaps/90ms-RKSJ-V.bcmap | Bin 290 -> 0 bytes .../pdfjs/web/cmaps/90msp-RKSJ-H.bcmap | Bin 715 -> 0 bytes .../pdfjs/web/cmaps/90msp-RKSJ-V.bcmap | Bin 291 -> 0 bytes .../pdfjs/web/cmaps/90pv-RKSJ-H.bcmap | Bin 982 -> 0 bytes .../pdfjs/web/cmaps/90pv-RKSJ-V.bcmap | Bin 260 -> 0 bytes .../portfolio/pdfjs/web/cmaps/Add-H.bcmap | Bin 2419 -> 0 bytes .../pdfjs/web/cmaps/Add-RKSJ-H.bcmap | Bin 2413 -> 0 bytes .../pdfjs/web/cmaps/Add-RKSJ-V.bcmap | Bin 287 -> 0 bytes .../portfolio/pdfjs/web/cmaps/Add-V.bcmap | Bin 282 -> 0 bytes .../pdfjs/web/cmaps/Adobe-CNS1-0.bcmap | Bin 317 -> 0 bytes .../pdfjs/web/cmaps/Adobe-CNS1-1.bcmap | Bin 371 -> 0 bytes .../pdfjs/web/cmaps/Adobe-CNS1-2.bcmap | Bin 376 -> 0 bytes .../pdfjs/web/cmaps/Adobe-CNS1-3.bcmap | Bin 401 -> 0 bytes .../pdfjs/web/cmaps/Adobe-CNS1-4.bcmap | Bin 405 -> 0 bytes .../pdfjs/web/cmaps/Adobe-CNS1-5.bcmap | Bin 406 -> 0 bytes .../pdfjs/web/cmaps/Adobe-CNS1-6.bcmap | Bin 406 -> 0 bytes .../pdfjs/web/cmaps/Adobe-CNS1-UCS2.bcmap | Bin 41193 -> 0 bytes .../pdfjs/web/cmaps/Adobe-GB1-0.bcmap | Bin 217 -> 0 bytes .../pdfjs/web/cmaps/Adobe-GB1-1.bcmap | Bin 250 -> 0 bytes .../pdfjs/web/cmaps/Adobe-GB1-2.bcmap | Bin 465 -> 0 bytes .../pdfjs/web/cmaps/Adobe-GB1-3.bcmap | Bin 470 -> 0 bytes .../pdfjs/web/cmaps/Adobe-GB1-4.bcmap | Bin 601 -> 0 bytes .../pdfjs/web/cmaps/Adobe-GB1-5.bcmap | Bin 625 -> 0 bytes .../pdfjs/web/cmaps/Adobe-GB1-UCS2.bcmap | Bin 33974 -> 0 bytes .../pdfjs/web/cmaps/Adobe-Japan1-0.bcmap | Bin 225 -> 0 bytes .../pdfjs/web/cmaps/Adobe-Japan1-1.bcmap | Bin 226 -> 0 bytes .../pdfjs/web/cmaps/Adobe-Japan1-2.bcmap | Bin 233 -> 0 bytes .../pdfjs/web/cmaps/Adobe-Japan1-3.bcmap | Bin 242 -> 0 bytes .../pdfjs/web/cmaps/Adobe-Japan1-4.bcmap | Bin 337 -> 0 bytes .../pdfjs/web/cmaps/Adobe-Japan1-5.bcmap | Bin 430 -> 0 bytes .../pdfjs/web/cmaps/Adobe-Japan1-6.bcmap | Bin 485 -> 0 bytes .../pdfjs/web/cmaps/Adobe-Japan1-UCS2.bcmap | Bin 40951 -> 0 bytes .../pdfjs/web/cmaps/Adobe-Korea1-0.bcmap | Bin 241 -> 0 bytes .../pdfjs/web/cmaps/Adobe-Korea1-1.bcmap | Bin 386 -> 0 bytes .../pdfjs/web/cmaps/Adobe-Korea1-2.bcmap | Bin 391 -> 0 bytes .../pdfjs/web/cmaps/Adobe-Korea1-UCS2.bcmap | Bin 23293 -> 0 bytes .../portfolio/pdfjs/web/cmaps/B5-H.bcmap | Bin 1086 -> 0 bytes .../portfolio/pdfjs/web/cmaps/B5-V.bcmap | Bin 142 -> 0 bytes .../portfolio/pdfjs/web/cmaps/B5pc-H.bcmap | Bin 1099 -> 0 bytes .../portfolio/pdfjs/web/cmaps/B5pc-V.bcmap | Bin 144 -> 0 bytes .../portfolio/pdfjs/web/cmaps/CNS-EUC-H.bcmap | Bin 1780 -> 0 bytes .../portfolio/pdfjs/web/cmaps/CNS-EUC-V.bcmap | Bin 1920 -> 0 bytes .../portfolio/pdfjs/web/cmaps/CNS1-H.bcmap | Bin 706 -> 0 bytes .../portfolio/pdfjs/web/cmaps/CNS1-V.bcmap | Bin 143 -> 0 bytes .../portfolio/pdfjs/web/cmaps/CNS2-H.bcmap | Bin 504 -> 0 bytes .../portfolio/pdfjs/web/cmaps/CNS2-V.bcmap | 3 - .../portfolio/pdfjs/web/cmaps/ETHK-B5-H.bcmap | Bin 4426 -> 0 bytes .../portfolio/pdfjs/web/cmaps/ETHK-B5-V.bcmap | Bin 158 -> 0 bytes .../portfolio/pdfjs/web/cmaps/ETen-B5-H.bcmap | Bin 1125 -> 0 bytes .../portfolio/pdfjs/web/cmaps/ETen-B5-V.bcmap | Bin 158 -> 0 bytes .../pdfjs/web/cmaps/ETenms-B5-H.bcmap | 3 - .../pdfjs/web/cmaps/ETenms-B5-V.bcmap | Bin 172 -> 0 bytes .../portfolio/pdfjs/web/cmaps/EUC-H.bcmap | Bin 578 -> 0 bytes .../portfolio/pdfjs/web/cmaps/EUC-V.bcmap | Bin 170 -> 0 bytes .../portfolio/pdfjs/web/cmaps/Ext-H.bcmap | Bin 2536 -> 0 bytes .../pdfjs/web/cmaps/Ext-RKSJ-H.bcmap | Bin 2542 -> 0 bytes .../pdfjs/web/cmaps/Ext-RKSJ-V.bcmap | Bin 218 -> 0 bytes .../portfolio/pdfjs/web/cmaps/Ext-V.bcmap | Bin 215 -> 0 bytes .../portfolio/pdfjs/web/cmaps/GB-EUC-H.bcmap | Bin 549 -> 0 bytes .../portfolio/pdfjs/web/cmaps/GB-EUC-V.bcmap | Bin 179 -> 0 bytes .../portfolio/pdfjs/web/cmaps/GB-H.bcmap | 4 - .../portfolio/pdfjs/web/cmaps/GB-V.bcmap | Bin 175 -> 0 bytes .../portfolio/pdfjs/web/cmaps/GBK-EUC-H.bcmap | Bin 14692 -> 0 bytes .../portfolio/pdfjs/web/cmaps/GBK-EUC-V.bcmap | Bin 180 -> 0 bytes .../portfolio/pdfjs/web/cmaps/GBK2K-H.bcmap | Bin 19662 -> 0 bytes .../portfolio/pdfjs/web/cmaps/GBK2K-V.bcmap | Bin 219 -> 0 bytes .../pdfjs/web/cmaps/GBKp-EUC-H.bcmap | Bin 14686 -> 0 bytes .../pdfjs/web/cmaps/GBKp-EUC-V.bcmap | Bin 181 -> 0 bytes .../portfolio/pdfjs/web/cmaps/GBT-EUC-H.bcmap | Bin 7290 -> 0 bytes .../portfolio/pdfjs/web/cmaps/GBT-EUC-V.bcmap | Bin 180 -> 0 bytes .../portfolio/pdfjs/web/cmaps/GBT-H.bcmap | Bin 7269 -> 0 bytes .../portfolio/pdfjs/web/cmaps/GBT-V.bcmap | Bin 176 -> 0 bytes .../pdfjs/web/cmaps/GBTpc-EUC-H.bcmap | Bin 7298 -> 0 bytes .../pdfjs/web/cmaps/GBTpc-EUC-V.bcmap | Bin 182 -> 0 bytes .../pdfjs/web/cmaps/GBpc-EUC-H.bcmap | Bin 557 -> 0 bytes .../pdfjs/web/cmaps/GBpc-EUC-V.bcmap | Bin 181 -> 0 bytes .../portfolio/pdfjs/web/cmaps/H.bcmap | Bin 553 -> 0 bytes .../pdfjs/web/cmaps/HKdla-B5-H.bcmap | Bin 2654 -> 0 bytes .../pdfjs/web/cmaps/HKdla-B5-V.bcmap | Bin 148 -> 0 bytes .../pdfjs/web/cmaps/HKdlb-B5-H.bcmap | Bin 2414 -> 0 bytes .../pdfjs/web/cmaps/HKdlb-B5-V.bcmap | Bin 148 -> 0 bytes .../pdfjs/web/cmaps/HKgccs-B5-H.bcmap | Bin 2292 -> 0 bytes .../pdfjs/web/cmaps/HKgccs-B5-V.bcmap | Bin 149 -> 0 bytes .../pdfjs/web/cmaps/HKm314-B5-H.bcmap | Bin 1772 -> 0 bytes .../pdfjs/web/cmaps/HKm314-B5-V.bcmap | Bin 149 -> 0 bytes .../pdfjs/web/cmaps/HKm471-B5-H.bcmap | Bin 2171 -> 0 bytes .../pdfjs/web/cmaps/HKm471-B5-V.bcmap | Bin 149 -> 0 bytes .../pdfjs/web/cmaps/HKscs-B5-H.bcmap | Bin 4437 -> 0 bytes .../pdfjs/web/cmaps/HKscs-B5-V.bcmap | Bin 159 -> 0 bytes .../portfolio/pdfjs/web/cmaps/Hankaku.bcmap | Bin 132 -> 0 bytes .../portfolio/pdfjs/web/cmaps/Hiragana.bcmap | Bin 124 -> 0 bytes .../portfolio/pdfjs/web/cmaps/KSC-EUC-H.bcmap | Bin 1848 -> 0 bytes .../portfolio/pdfjs/web/cmaps/KSC-EUC-V.bcmap | Bin 164 -> 0 bytes .../portfolio/pdfjs/web/cmaps/KSC-H.bcmap | Bin 1831 -> 0 bytes .../pdfjs/web/cmaps/KSC-Johab-H.bcmap | Bin 16791 -> 0 bytes .../pdfjs/web/cmaps/KSC-Johab-V.bcmap | Bin 166 -> 0 bytes .../portfolio/pdfjs/web/cmaps/KSC-V.bcmap | Bin 160 -> 0 bytes .../pdfjs/web/cmaps/KSCms-UHC-H.bcmap | Bin 2787 -> 0 bytes .../pdfjs/web/cmaps/KSCms-UHC-HW-H.bcmap | Bin 2789 -> 0 bytes .../pdfjs/web/cmaps/KSCms-UHC-HW-V.bcmap | Bin 169 -> 0 bytes .../pdfjs/web/cmaps/KSCms-UHC-V.bcmap | Bin 166 -> 0 bytes .../pdfjs/web/cmaps/KSCpc-EUC-H.bcmap | Bin 2024 -> 0 bytes .../pdfjs/web/cmaps/KSCpc-EUC-V.bcmap | Bin 166 -> 0 bytes .../portfolio/pdfjs/web/cmaps/Katakana.bcmap | Bin 100 -> 0 bytes .../portfolio/pdfjs/web/cmaps/LICENSE | 36 - .../portfolio/pdfjs/web/cmaps/NWP-H.bcmap | Bin 2765 -> 0 bytes .../portfolio/pdfjs/web/cmaps/NWP-V.bcmap | Bin 252 -> 0 bytes .../portfolio/pdfjs/web/cmaps/RKSJ-H.bcmap | Bin 534 -> 0 bytes .../portfolio/pdfjs/web/cmaps/RKSJ-V.bcmap | Bin 170 -> 0 bytes .../portfolio/pdfjs/web/cmaps/Roman.bcmap | Bin 96 -> 0 bytes .../pdfjs/web/cmaps/UniCNS-UCS2-H.bcmap | Bin 48280 -> 0 bytes .../pdfjs/web/cmaps/UniCNS-UCS2-V.bcmap | Bin 156 -> 0 bytes .../pdfjs/web/cmaps/UniCNS-UTF16-H.bcmap | Bin 50419 -> 0 bytes .../pdfjs/web/cmaps/UniCNS-UTF16-V.bcmap | Bin 156 -> 0 bytes .../pdfjs/web/cmaps/UniCNS-UTF32-H.bcmap | Bin 52679 -> 0 bytes .../pdfjs/web/cmaps/UniCNS-UTF32-V.bcmap | Bin 160 -> 0 bytes .../pdfjs/web/cmaps/UniCNS-UTF8-H.bcmap | Bin 53629 -> 0 bytes .../pdfjs/web/cmaps/UniCNS-UTF8-V.bcmap | Bin 157 -> 0 bytes .../pdfjs/web/cmaps/UniGB-UCS2-H.bcmap | Bin 43366 -> 0 bytes .../pdfjs/web/cmaps/UniGB-UCS2-V.bcmap | Bin 193 -> 0 bytes .../pdfjs/web/cmaps/UniGB-UTF16-H.bcmap | Bin 44086 -> 0 bytes .../pdfjs/web/cmaps/UniGB-UTF16-V.bcmap | Bin 178 -> 0 bytes .../pdfjs/web/cmaps/UniGB-UTF32-H.bcmap | Bin 45738 -> 0 bytes .../pdfjs/web/cmaps/UniGB-UTF32-V.bcmap | Bin 182 -> 0 bytes .../pdfjs/web/cmaps/UniGB-UTF8-H.bcmap | Bin 46837 -> 0 bytes .../pdfjs/web/cmaps/UniGB-UTF8-V.bcmap | Bin 181 -> 0 bytes .../pdfjs/web/cmaps/UniJIS-UCS2-H.bcmap | Bin 25439 -> 0 bytes .../pdfjs/web/cmaps/UniJIS-UCS2-HW-H.bcmap | Bin 119 -> 0 bytes .../pdfjs/web/cmaps/UniJIS-UCS2-HW-V.bcmap | Bin 680 -> 0 bytes .../pdfjs/web/cmaps/UniJIS-UCS2-V.bcmap | Bin 664 -> 0 bytes .../pdfjs/web/cmaps/UniJIS-UTF16-H.bcmap | Bin 39443 -> 0 bytes .../pdfjs/web/cmaps/UniJIS-UTF16-V.bcmap | Bin 643 -> 0 bytes .../pdfjs/web/cmaps/UniJIS-UTF32-H.bcmap | Bin 40539 -> 0 bytes .../pdfjs/web/cmaps/UniJIS-UTF32-V.bcmap | Bin 677 -> 0 bytes .../pdfjs/web/cmaps/UniJIS-UTF8-H.bcmap | Bin 41695 -> 0 bytes .../pdfjs/web/cmaps/UniJIS-UTF8-V.bcmap | Bin 678 -> 0 bytes .../pdfjs/web/cmaps/UniJIS2004-UTF16-H.bcmap | Bin 39534 -> 0 bytes .../pdfjs/web/cmaps/UniJIS2004-UTF16-V.bcmap | Bin 647 -> 0 bytes .../pdfjs/web/cmaps/UniJIS2004-UTF32-H.bcmap | Bin 40630 -> 0 bytes .../pdfjs/web/cmaps/UniJIS2004-UTF32-V.bcmap | Bin 681 -> 0 bytes .../pdfjs/web/cmaps/UniJIS2004-UTF8-H.bcmap | Bin 41779 -> 0 bytes .../pdfjs/web/cmaps/UniJIS2004-UTF8-V.bcmap | Bin 682 -> 0 bytes .../pdfjs/web/cmaps/UniJISPro-UCS2-HW-V.bcmap | Bin 705 -> 0 bytes .../pdfjs/web/cmaps/UniJISPro-UCS2-V.bcmap | Bin 689 -> 0 bytes .../pdfjs/web/cmaps/UniJISPro-UTF8-V.bcmap | Bin 726 -> 0 bytes .../pdfjs/web/cmaps/UniJISX0213-UTF32-H.bcmap | Bin 40517 -> 0 bytes .../pdfjs/web/cmaps/UniJISX0213-UTF32-V.bcmap | Bin 684 -> 0 bytes .../web/cmaps/UniJISX02132004-UTF32-H.bcmap | Bin 40608 -> 0 bytes .../web/cmaps/UniJISX02132004-UTF32-V.bcmap | Bin 688 -> 0 bytes .../pdfjs/web/cmaps/UniKS-UCS2-H.bcmap | Bin 25783 -> 0 bytes .../pdfjs/web/cmaps/UniKS-UCS2-V.bcmap | Bin 178 -> 0 bytes .../pdfjs/web/cmaps/UniKS-UTF16-H.bcmap | Bin 26327 -> 0 bytes .../pdfjs/web/cmaps/UniKS-UTF16-V.bcmap | Bin 164 -> 0 bytes .../pdfjs/web/cmaps/UniKS-UTF32-H.bcmap | Bin 26451 -> 0 bytes .../pdfjs/web/cmaps/UniKS-UTF32-V.bcmap | Bin 168 -> 0 bytes .../pdfjs/web/cmaps/UniKS-UTF8-H.bcmap | Bin 27790 -> 0 bytes .../pdfjs/web/cmaps/UniKS-UTF8-V.bcmap | Bin 169 -> 0 bytes .../portfolio/pdfjs/web/cmaps/V.bcmap | Bin 166 -> 0 bytes .../portfolio/pdfjs/web/cmaps/WP-Symbol.bcmap | Bin 179 -> 0 bytes .../pdfjs/web/images/annotation-check.svg | 11 - .../pdfjs/web/images/annotation-comment.svg | 16 - .../pdfjs/web/images/annotation-help.svg | 26 - .../pdfjs/web/images/annotation-insert.svg | 10 - .../pdfjs/web/images/annotation-key.svg | 11 - .../web/images/annotation-newparagraph.svg | 11 - .../pdfjs/web/images/annotation-noicon.svg | 7 - .../pdfjs/web/images/annotation-note.svg | 42 - .../pdfjs/web/images/annotation-paperclip.svg | 6 - .../pdfjs/web/images/annotation-paragraph.svg | 16 - .../pdfjs/web/images/annotation-pushpin.svg | 7 - .../web/images/cursor-editorFreeText.svg | 3 - .../pdfjs/web/images/cursor-editorInk.svg | 4 - .../pdfjs/web/images/findbarButton-next.svg | 3 - .../web/images/findbarButton-previous.svg | 3 - .../pdfjs/web/images/loading-dark.svg | 24 - .../pdfjs/web/images/loading-icon.gif | Bin 2536 -> 0 bytes .../portfolio/pdfjs/web/images/loading.svg | 1 - ...ondaryToolbarButton-documentProperties.svg | 3 - .../secondaryToolbarButton-firstPage.svg | 3 - .../secondaryToolbarButton-handTool.svg | 3 - .../secondaryToolbarButton-lastPage.svg | 3 - .../secondaryToolbarButton-rotateCcw.svg | 3 - .../secondaryToolbarButton-rotateCw.svg | 3 - ...econdaryToolbarButton-scrollHorizontal.svg | 3 - .../secondaryToolbarButton-scrollPage.svg | 3 - .../secondaryToolbarButton-scrollVertical.svg | 3 - .../secondaryToolbarButton-scrollWrapped.svg | 3 - .../secondaryToolbarButton-selectTool.svg | 3 - .../secondaryToolbarButton-spreadEven.svg | 3 - .../secondaryToolbarButton-spreadNone.svg | 3 - .../secondaryToolbarButton-spreadOdd.svg | 3 - .../web/images/toolbarButton-bookmark.svg | 3 - .../toolbarButton-currentOutlineItem.svg | 3 - .../web/images/toolbarButton-download.svg | 4 - .../images/toolbarButton-editorFreeText.svg | 3 - .../web/images/toolbarButton-editorInk.svg | 4 - .../web/images/toolbarButton-menuArrow.svg | 3 - .../web/images/toolbarButton-openFile.svg | 3 - .../web/images/toolbarButton-pageDown.svg | 3 - .../pdfjs/web/images/toolbarButton-pageUp.svg | 3 - .../images/toolbarButton-presentationMode.svg | 3 - .../pdfjs/web/images/toolbarButton-print.svg | 3 - .../pdfjs/web/images/toolbarButton-search.svg | 3 - .../toolbarButton-secondaryToolbarToggle.svg | 3 - .../images/toolbarButton-sidebarToggle.svg | 3 - .../images/toolbarButton-viewAttachments.svg | 3 - .../web/images/toolbarButton-viewLayers.svg | 3 - .../web/images/toolbarButton-viewOutline.svg | 3 - .../images/toolbarButton-viewThumbnail.svg | 3 - .../pdfjs/web/images/toolbarButton-zoomIn.svg | 3 - .../web/images/toolbarButton-zoomOut.svg | 3 - .../pdfjs/web/images/treeitem-collapsed.svg | 1 - .../pdfjs/web/images/treeitem-expanded.svg | 1 - .../pdfjs/web/locale/ach/viewer.properties | 200 - .../pdfjs/web/locale/af/viewer.properties | 177 - .../pdfjs/web/locale/an/viewer.properties | 243 - .../pdfjs/web/locale/ar/viewer.properties | 247 - .../pdfjs/web/locale/ast/viewer.properties | 207 - .../pdfjs/web/locale/az/viewer.properties | 243 - .../pdfjs/web/locale/be/viewer.properties | 277 - .../pdfjs/web/locale/bg/viewer.properties | 235 - .../pdfjs/web/locale/bn/viewer.properties | 239 - .../pdfjs/web/locale/bo/viewer.properties | 238 - .../pdfjs/web/locale/br/viewer.properties | 247 - .../pdfjs/web/locale/brx/viewer.properties | 205 - .../pdfjs/web/locale/bs/viewer.properties | 194 - .../pdfjs/web/locale/ca/viewer.properties | 279 - .../pdfjs/web/locale/cak/viewer.properties | 275 - .../pdfjs/web/locale/ckb/viewer.properties | 234 - .../pdfjs/web/locale/cs/viewer.properties | 277 - .../pdfjs/web/locale/cy/viewer.properties | 279 - .../pdfjs/web/locale/da/viewer.properties | 277 - .../pdfjs/web/locale/de/viewer.properties | 277 - .../pdfjs/web/locale/dsb/viewer.properties | 279 - .../pdfjs/web/locale/el/viewer.properties | 279 - .../pdfjs/web/locale/en-CA/viewer.properties | 279 - .../pdfjs/web/locale/en-GB/viewer.properties | 277 - .../pdfjs/web/locale/en-US/viewer.properties | 252 - .../pdfjs/web/locale/eo/viewer.properties | 279 - .../pdfjs/web/locale/es-AR/viewer.properties | 277 - .../pdfjs/web/locale/es-CL/viewer.properties | 277 - .../pdfjs/web/locale/es-ES/viewer.properties | 277 - .../pdfjs/web/locale/es-MX/viewer.properties | 277 - .../pdfjs/web/locale/et/viewer.properties | 262 - .../pdfjs/web/locale/eu/viewer.properties | 277 - .../pdfjs/web/locale/fa/viewer.properties | 216 - .../pdfjs/web/locale/ff/viewer.properties | 235 - .../pdfjs/web/locale/fi/viewer.properties | 279 - .../pdfjs/web/locale/fr/viewer.properties | 277 - .../pdfjs/web/locale/fur/viewer.properties | 277 - .../pdfjs/web/locale/fy-NL/viewer.properties | 277 - .../pdfjs/web/locale/ga-IE/viewer.properties | 202 - .../pdfjs/web/locale/gd/viewer.properties | 249 - .../pdfjs/web/locale/gl/viewer.properties | 249 - .../pdfjs/web/locale/gn/viewer.properties | 279 - .../pdfjs/web/locale/gu-IN/viewer.properties | 235 - .../pdfjs/web/locale/he/viewer.properties | 276 - .../pdfjs/web/locale/hi-IN/viewer.properties | 235 - .../pdfjs/web/locale/hr/viewer.properties | 267 - .../pdfjs/web/locale/hsb/viewer.properties | 279 - .../pdfjs/web/locale/hu/viewer.properties | 277 - .../pdfjs/web/locale/hy-AM/viewer.properties | 251 - .../pdfjs/web/locale/hye/viewer.properties | 252 - .../pdfjs/web/locale/ia/viewer.properties | 277 - .../pdfjs/web/locale/id/viewer.properties | 277 - .../pdfjs/web/locale/is/viewer.properties | 277 - .../pdfjs/web/locale/it/viewer.properties | 217 - .../pdfjs/web/locale/ja/viewer.properties | 279 - .../pdfjs/web/locale/ka/viewer.properties | 277 - .../pdfjs/web/locale/kab/viewer.properties | 279 - .../pdfjs/web/locale/kk/viewer.properties | 277 - .../pdfjs/web/locale/km/viewer.properties | 210 - .../pdfjs/web/locale/kn/viewer.properties | 187 - .../pdfjs/web/locale/ko/viewer.properties | 277 - .../pdfjs/web/locale/lij/viewer.properties | 235 - .../pdfjs/web/locale/lo/viewer.properties | 145 - .../pdfjs/web/locale/locale.properties | 333 - .../pdfjs/web/locale/lt/viewer.properties | 260 - .../pdfjs/web/locale/ltg/viewer.properties | 213 - .../pdfjs/web/locale/lv/viewer.properties | 235 - .../pdfjs/web/locale/meh/viewer.properties | 111 - .../pdfjs/web/locale/mk/viewer.properties | 139 - .../pdfjs/web/locale/mr/viewer.properties | 231 - .../pdfjs/web/locale/ms/viewer.properties | 235 - .../pdfjs/web/locale/my/viewer.properties | 191 - .../pdfjs/web/locale/nb-NO/viewer.properties | 277 - .../pdfjs/web/locale/ne-NP/viewer.properties | 218 - .../pdfjs/web/locale/nl/viewer.properties | 277 - .../pdfjs/web/locale/nn-NO/viewer.properties | 277 - .../pdfjs/web/locale/oc/viewer.properties | 277 - .../pdfjs/web/locale/pa-IN/viewer.properties | 277 - .../pdfjs/web/locale/pl/viewer.properties | 277 - .../pdfjs/web/locale/pt-BR/viewer.properties | 277 - .../pdfjs/web/locale/pt-PT/viewer.properties | 277 - .../pdfjs/web/locale/rm/viewer.properties | 277 - .../pdfjs/web/locale/ro/viewer.properties | 241 - .../pdfjs/web/locale/ru/viewer.properties | 277 - .../pdfjs/web/locale/sat/viewer.properties | 198 - .../pdfjs/web/locale/sc/viewer.properties | 265 - .../pdfjs/web/locale/scn/viewer.properties | 101 - .../pdfjs/web/locale/sco/viewer.properties | 249 - .../pdfjs/web/locale/si/viewer.properties | 237 - .../pdfjs/web/locale/sk/viewer.properties | 277 - .../pdfjs/web/locale/skr/viewer.properties | 277 - .../pdfjs/web/locale/sl/viewer.properties | 277 - .../pdfjs/web/locale/son/viewer.properties | 173 - .../pdfjs/web/locale/sq/viewer.properties | 271 - .../pdfjs/web/locale/sr/viewer.properties | 277 - .../pdfjs/web/locale/sv-SE/viewer.properties | 277 - .../pdfjs/web/locale/szl/viewer.properties | 245 - .../pdfjs/web/locale/ta/viewer.properties | 194 - .../pdfjs/web/locale/te/viewer.properties | 237 - .../pdfjs/web/locale/tg/viewer.properties | 279 - .../pdfjs/web/locale/th/viewer.properties | 279 - .../pdfjs/web/locale/tl/viewer.properties | 243 - .../pdfjs/web/locale/tr/viewer.properties | 279 - .../pdfjs/web/locale/trs/viewer.properties | 206 - .../pdfjs/web/locale/uk/viewer.properties | 277 - .../pdfjs/web/locale/ur/viewer.properties | 239 - .../pdfjs/web/locale/uz/viewer.properties | 163 - .../pdfjs/web/locale/vi/viewer.properties | 277 - .../pdfjs/web/locale/wo/viewer.properties | 123 - .../pdfjs/web/locale/xh/viewer.properties | 177 - .../pdfjs/web/locale/zh-CN/viewer.properties | 279 - .../pdfjs/web/locale/zh-TW/viewer.properties | 277 - .../web/standard_fonts/FoxitDingbats.pfb | Bin 29513 -> 0 bytes .../pdfjs/web/standard_fonts/FoxitFixed.pfb | Bin 17597 -> 0 bytes .../web/standard_fonts/FoxitFixedBold.pfb | Bin 18055 -> 0 bytes .../standard_fonts/FoxitFixedBoldItalic.pfb | Bin 19151 -> 0 bytes .../web/standard_fonts/FoxitFixedItalic.pfb | Bin 18746 -> 0 bytes .../pdfjs/web/standard_fonts/FoxitSans.pfb | Bin 15025 -> 0 bytes .../web/standard_fonts/FoxitSansBold.pfb | Bin 16344 -> 0 bytes .../standard_fonts/FoxitSansBoldItalic.pfb | Bin 16418 -> 0 bytes .../web/standard_fonts/FoxitSansItalic.pfb | Bin 16339 -> 0 bytes .../pdfjs/web/standard_fonts/FoxitSerif.pfb | Bin 19469 -> 0 bytes .../web/standard_fonts/FoxitSerifBold.pfb | Bin 19395 -> 0 bytes .../standard_fonts/FoxitSerifBoldItalic.pfb | Bin 20733 -> 0 bytes .../web/standard_fonts/FoxitSerifItalic.pfb | Bin 21227 -> 0 bytes .../pdfjs/web/standard_fonts/FoxitSymbol.pfb | Bin 16729 -> 0 bytes .../pdfjs/web/standard_fonts/LICENSE_FOXIT | 27 - .../web/standard_fonts/LICENSE_LIBERATION | 102 - .../standard_fonts/LiberationSans-Bold.ttf | Bin 137052 -> 0 bytes .../LiberationSans-BoldItalic.ttf | Bin 135124 -> 0 bytes .../standard_fonts/LiberationSans-Italic.ttf | Bin 162036 -> 0 bytes .../standard_fonts/LiberationSans-Regular.ttf | Bin 139512 -> 0 bytes .../portfolio/pdfjs/web/viewer.css | 2691 - .../portfolio/pdfjs/web/viewer.html | 16 - .../portfolio/pdfjs/web/viewer.js | 13898 ---- archive/portfolio-old/portfolio/projects.html | 11 - .../portfolio/resume-images/page-01-1200.webp | Bin 186402 -> 0 bytes .../resume-images/page-01-40-1200.webp | Bin 188 -> 0 bytes .../resume-images/page-01-40-480.webp | Bin 188 -> 0 bytes .../resume-images/page-01-40-800.webp | Bin 188 -> 0 bytes .../portfolio/resume-images/page-01-40.webp | Bin 154 -> 0 bytes .../portfolio/resume-images/page-01-480.webp | Bin 46436 -> 0 bytes .../portfolio/resume-images/page-01-800.webp | Bin 107442 -> 0 bytes .../portfolio/resume-images/page-01.png | Bin 279655 -> 0 bytes .../portfolio/resume-images/page-01.webp | Bin 190784 -> 0 bytes archive/portfolio-old/portfolio/robots.txt | 8 - archive/portfolio-old/portfolio/sitemap.xml | 8 - data/custom-llm/summary.json | 61 - data/custom-llm/train.jsonl | 327 - data/external-datasets/kalshi-seed.jsonl | 6 - .../live-market-delta-2026-03-10.jsonl | 12 - .../live-market-delta-2026-03-11.jsonl | 4 - .../xai-grok-image-capabilities.jsonl | 10 - .../xai-grok-trading-visual-prompts.jsonl | 10 - data/tradebot/eval-history.jsonl | 5 - data/tradebot/eval-responses.jsonl | 4 - data/tradebot/eval-score.json | 65 - data/tradebot/eval-suite.jsonl | 4 - data/tradebot/manifest.json | 59 - data/tradebot/train.chat.jsonl | 84 - data/tradebot/train.raw.jsonl | 84 - data/tradebot/validation.chat.jsonl | 21 - data/tradebot/validation.raw.jsonl | 21 - .../AI_TRAINING_POST_DEPLOY_RUNBOOK.md | 224 - deploy/namecheap/README.md | 69 - deploy/namecheap/SERVER_SETUP_COPYPASTA.sh | 189 - deploy/namecheap/bootstrap-server.sh | 66 - deploy/namecheap/deploy-remote.sh | 182 - deploy/namecheap/env.production.example | 29 - deploy/namecheap/nginx.tradehax.conf | 38 - deploy/namecheap/setup-cron.sh | 32 - docs/AI_DYNASTY_PHASE3_PHASE4_PLAN.md | 109 - docs/AI_HUB_MAKEOVER_PLAN.md | 252 - docs/COMPETITIVE_ROADMAP_TIER1_VISION.md | 374 - docs/HF_FINE_TUNING_WORKFLOW.md | 31 - docs/HIVEMIND_FOUNDATION.md | 61 - docs/IDE_PIPELINE_SYNC_WORKFLOW.md | 68 - docs/PERSONAL_TRADING_ASSISTANT_SETUP.md | 176 - docs/PRODUCTION_READINESS_ACTION_PLAN.md | 116 - index.html | 5 - k8s/ai-micro-deployment.yaml | 51 - k8s/ai-micro-hpa.yaml | 18 - k8s/deployment.yaml | 125 - k8s/ingress.yaml | 63 - k8s/nginx-ingress.yaml | 283 - main/.dockerignore | 70 - main/.env.production | 135 - main/.github/workflows/ci-cd.yml | 282 - .../workflows/deploy-multi-project.yml | 190 - main/.github/workflows/deploy-notify.yml | 13 - main/.github/workflows/deploy-production.yml | 178 - main/.github/workflows/social-media-sync.yml | 417 - main/.github/workflows/unified-mcp-deploy.yml | 277 - main/.gitignore | 81 - main/.gitlensrc.json | 23 - main/.gitlog_check.txt | 5 - main/.gitstatus_check.txt | 1 - main/.husky/pre-push | 18 - main/.oauth-disabled.json | 17 - main/.project-id | 4 - main/.renovaterc.json | 51 - main/404_DIAGNOSIS_ALTERNATIVE.md | 0 main/404_FIX_DEPLOYED.md | 194 - main/404_RESOLVED_FINAL.md | 0 main/AGENTS.md | 43 - main/AI_CENSORSHIP_REMOVED_GUITAR_PRICING.md | 191 - main/AI_FOUNDATION_ENHANCEMENT_STRATEGY.md | 563 - main/API_AUTONOMOUS_PUSH_COMPLETE.md | 324 - main/API_CONNECTIONS_INVENTORY.md | 407 - main/BACKUP_ENDPOINTS_CONFIGURED.md | 339 - main/BLANK_PAGE_FIXED.txt | 62 - main/CLEANUP_COMPLETE_VERIFICATION.md | 299 - main/CLEAN_REBUILD_COMPLETE.md | 0 main/CLEAN_WORKSPACE_SETUP.md | 352 - main/COMPLETE_CLEANUP_CHECKLIST.md | 336 - main/COMPLETE_DELIVERABLES.md | 386 - main/COMPLETE_FILE_REFERENCE.md | 359 - main/COMPLETE_SETUP_SUMMARY.md | 299 - main/COMPLETION_CERTIFICATE_RUNTIME_PATCH.md | 365 - main/COMPLETION_SUMMARY.md | 424 - main/DEEP_INSPECTION_REPORT.md | 271 - main/DELIVERABLES.md | 283 - main/DELIVERABLES_RUNTIME_VALIDATION.md | 344 - main/DEPLOY.bat | 152 - main/DEPLOYMENT.md | 317 - main/DEPLOYMENT_BLOCKED_BILLING.md | 191 - main/DEPLOYMENT_COMPLETE.md | 290 - main/DEPLOYMENT_COMPLETE_AI_UNLOCKED.txt | 83 - main/DEPLOYMENT_COMPLETE_BYPASS.md | 352 - .../DEPLOYMENT_COMPLETE_RUNTIME_VALIDATION.md | 321 - main/DEPLOYMENT_COMPLETE_SUCCESS.md | 294 - main/DEPLOYMENT_CONFIGURATION_COMPLETE.md | 408 - main/DEPLOYMENT_FINAL.md | 421 - main/DEPLOYMENT_FINAL_STATUS.md | 0 main/DEPLOYMENT_FINAL_SWEEP.md | 212 - main/DEPLOYMENT_LIVE_SUCCESS.md | 0 main/DEPLOYMENT_MISMATCH_REPORT.md | 199 - main/DEPLOYMENT_PHASE_COMPLETE.md | 265 - main/DEPLOYMENT_QUICKSTART.md | 398 - main/DEPLOYMENT_READY.md | 328 - main/DEPLOYMENT_READY_GO_LIVE.md | 217 - main/DEPLOYMENT_READY_STATUS.md | 340 - main/DEPLOYMENT_RUNTIME_VALIDATION_PATCH.md | 330 - main/DEPLOYMENT_STATUS.md | 316 - main/DEPLOYMENT_STATUS_FINAL.md | 265 - main/DEPLOYMENT_STATUS_GUITAR_PRICING.txt | 171 - main/DEPLOYMENT_SYNC_EXPLAINED.md | 530 - main/DEPLOYMENT_TRADEHAX_NET.md | 438 - main/DEPLOY_QUICK_REF.md | 198 - main/DISABLE_PASSWORD_PROTECTION.md | 140 - main/DOCKER_COMPOSE_STATUS.md | 93 - main/DOCUMENTATION_INDEX.md | 465 - main/DOCUMENTATION_INDEX_RUNTIME_PATCH.md | 312 - main/DOMAIN_PIPELINE_ARCHITECTURE.md | 232 - main/Dockerfile | 50 - main/Documentation/API_CONNECTION_GUIDE.md | 500 - .../COMPETITOR_DEEP_DIVE_2026.md | 93 - main/Documentation/DEPLOY_ONE_COMMAND.md | 78 - main/Documentation/HF_TOKEN_SETUP.md | 351 - main/Documentation/LEGAL_GUARDRAILS.md | 31 - main/Documentation/MOBILE_WEB_OPTIMIZATION.md | 449 - main/Documentation/OSS_COMPONENT_SHORTLIST.md | 68 - .../Documentation/SOCIAL_MEDIA_INTEGRATION.md | 491 - main/Documentation/SOCIAL_MEDIA_MCP_GUIDE.md | 657 - .../SOCIAL_MEDIA_MCP_IMPLEMENTATION.md | 524 - .../SOCIAL_MEDIA_QUICK_REFERENCE.md | 464 - main/ENDPOINT_FIX_INSTRUCTIONS.md | 144 - main/ENTERPRISE_DEVELOPMENT_STRATEGY.md | 447 - main/EXECUTION_CHECKLIST_ALL_TARGETS.md | 572 - main/EXECUTION_READY.md | 323 - main/EXECUTIVE_SUMMARY_PHASE_1.md | 355 - main/EXECUTIVE_SUMMARY_RUNTIME_PATCH.md | 230 - main/FILES_CREATED_SUMMARY.md | 296 - main/FINAL_CLEANUP_REPORT_2026.md | 253 - main/FINAL_DELIVERABLES_LIST.md | 475 - main/FINAL_DEPLOYMENT_CHECKLIST.md | 291 - main/FINAL_DEPLOYMENT_STEPS.md | 277 - main/FINAL_STATUS_GUITAR_PRICING_DEPLOYED.txt | 96 - main/GITHUB_REPOSITORY_GUIDE.md | 617 - main/GITLENS_SETUP_GUIDE.md | 101 - main/GIT_COMMIT_PAPER_TRADING.md | 122 - main/GPT_Trading_Assistant_API_Spec.md | 33 - ..._Trading_Assistant_Deployment_Checklist.md | 26 - ...ding_Assistant_Integration_Architecture.md | 43 - ...Trading_Assistant_Production_Validation.md | 26 - ...PT_Trading_Assistant_Signal_Alert_Logic.md | 23 - main/GPT_Trading_Assistant_UIUX_Wireframe.md | 31 - main/GROK_4_SETUP_COMPLETE.md | 391 - main/GROWTH_CAPABILITIES_ROADMAP.md | 595 - main/GUITAR_PRICING_CONFIG_CREATED.md | 97 - main/GUITAR_PRICING_UPDATE.md | 99 - main/GUITAR_PRICING_UPDATE_NEEDED.md | 217 - main/HF_TOKEN_SETUP_COMPLETE.md | 203 - main/HIGH_VALUE_TARGETS_IMPLEMENTATION.md | 421 - main/IDE_DEVELOPMENT_SETUP.md | 576 - main/IMPLEMENTATION_COMPLETE.md | 350 - ...LEMENTATION_COMPLETE_RUNTIME_VALIDATION.md | 370 - main/IMPLEMENTATION_QUICK_REF.md | 177 - main/IMPLEMENTATION_STATUS_TARGETS_1_2_3.md | 406 - main/IMPROVEMENTS_SUMMARY.md | 523 - main/INSPECTION_SUMMARY.md | 309 - main/INTELLIJ_IDEA_SETUP.md | 386 - main/JAVA_21_COMPLETION_RECORD.md | 436 - main/JAVA_21_FINAL_SUMMARY.txt | 344 - main/JAVA_21_LTS_UPGRADE.md | 361 - main/JAVA_21_QUICK_START.md | 142 - main/JAVA_21_UPGRADE_EXECUTION_SUMMARY.md | 379 - main/JAVA_21_UPGRADE_INDEX.md | 347 - main/JAVA_DEPLOYMENT_COMPLETE.md | 356 - main/JAVA_UPGRADE_STATUS.txt | 260 - main/LIVEPASS_CHECKLIST.md | 434 - main/LIVEPASS_DEPLOYMENT_REPORT.md | 579 - main/LIVEPASS_FINAL_REPORT.txt | Bin 8546 -> 0 bytes main/LIVEPASS_REPORT.txt | Bin 5326 -> 0 bytes main/MASTERS_SUBMISSION_PACKAGE.md | 267 - main/MCP_UNIFIED_ENVIRONMENT.md | 500 - main/MISSING_SECRETS_CHECKLIST.md | 93 - main/MULTI_PROJECT_DEPLOYMENT.md | 406 - main/MULTI_PROJECT_UPDATE.md | 261 - main/Makefile | 132 - main/NEURAL_ENGINE_DEPLOYMENT.md | 365 - main/NEURAL_ENGINE_FINAL_SUMMARY.md | 391 - main/NEURAL_ENGINE_INDEX.md | 442 - main/NEURAL_ENGINE_INTEGRATION_GUIDE.md | 577 - main/NEURAL_ENGINE_MANIFEST.md | 697 - main/NEURAL_ENGINE_README.md | 517 - main/NEURAL_HUB_DEPLOYED.txt | 283 - main/NEURAL_HUB_INTEGRATION_GUIDE.md | 347 - main/OPTIMIZATION_DEPLOYMENT_COMPLETE.md | 369 - main/PAPER_TRADING_COMPLETE.md | 282 - main/PAPER_TRADING_MODE.md | 236 - main/PAPER_TRADING_QUICKSTART.md | 298 - main/PAPER_TRADING_SUMMARY.md | 294 - main/PHASE_1_ARCHITECTURE.md | 635 - main/PHASE_1_COMPLETE_SUMMARY.md | 814 - main/PHASE_1_COMPLETION_SUMMARY.md | 470 - main/PHASE_1_DELIVERABLES_MANIFEST.md | 427 - main/PHASE_1_DEPLOYMENT_MANIFEST.md | 427 - main/PHASE_1_INDEX.md | 342 - main/PHASE_1_QUICK_START.md | 396 - main/PHASE_1_README.md | 397 - main/PHASE_1_STATUS_REPORT.md | 554 - main/POLYCLAW_INTEGRATION_SUMMARY.md | 26 - main/POLYMARKET_AI_SETUP.md | 159 - main/POLYMARKET_DEPLOYMENT_CHECKLIST.md | 458 - main/POLYMARKET_QUICK_REFERENCE.md | 336 - main/POLYMARKET_TRADING_ASSISTANT_GUIDE.md | 395 - ...MARKET_TRADING_ASSISTANT_IMPLEMENTATION.md | 397 - main/PRE_DEPLOYMENT_VERIFICATION_FINAL.md | 154 - main/PRODUCTION_COMPLETE.md | 350 - main/PRODUCTION_ENV_SETUP.md | 280 - main/PRODUCTION_FINAL_SUCCESS.txt | 125 - main/PRODUCTION_INDEX.md | 276 - main/PRODUCTION_READY.md | 333 - main/PRODUCTION_STATUS_FINAL.md | 272 - main/QUICK_ACTION_REQUIRED.txt | 70 - main/QUICK_AUTH_REFERENCE.txt | 48 - main/QUICK_FIX.txt | 51 - main/QUICK_REFERENCE.md | 249 - main/README.md | 363 - main/README_INDEX.md | 359 - main/README_POLYMARKET_INDEX.md | 492 - main/READY_TO_DEPLOY.md | 341 - main/READY_TO_DEPLOY.txt | 151 - main/REBUILD_STATUS.md | 30 - main/REMOVE_AUTHENTICATION_BARRIERS.md | 324 - main/REPOSITORY_CONSOLIDATION_PLAN.md | 213 - main/REPOSITORY_SYNC_REPORT.md | 437 - main/RUNTIME_ERROR_FIX.md | 177 - main/RUNTIME_FIX_STATUS.md | 155 - main/RUNTIME_VALIDATION_PATCH.md | 200 - main/SECURE_DEPLOYMENT_GUIDE.md | 296 - main/SECURITY_AUDIT.md | 256 - main/SECURITY_DEPLOYMENT_REPORT.md | 284 - main/START_HERE_SUPABASE.md | 143 - main/STREAMLINED_ARCHITECTURE_2026.md | 167 - main/SUCCESS_SUMMARY.txt | 166 - main/SUPABASE_CONNECTION_STATUS.md | 194 - main/SUPABASE_DEPLOYMENT_CHECKLIST.md | 384 - main/SUPABASE_DOCUMENTATION_INDEX.md | 380 - main/SUPABASE_INITIALIZATION_GUIDE.md | 360 - main/SUPABASE_QUICK_START.md | 424 - main/SUPABASE_STATUS_SUMMARY.md | 350 - main/SUPABASE_TROUBLESHOOTING.md | 396 - main/TRADEHAXAI_DOMAINS_FIXED.md | 173 - main/TRADEHAX_MERGE_COMPLETE.md | 202 - main/TRADEHAX_SYSTEM_OVERVIEW.md | 616 - main/TRADING_GATE_IMPLEMENTATION_COMPLETE.md | 305 - main/TWO_PROJECT_QUICK_START.md | 305 - main/VERCEL_REDIRECT_FIX.md | 161 - main/VITE_REACT_PLUGIN_FIX.md | 259 - main/WHATS_NEW.md | 328 - main/XAI_GROK_SUPABASE_COMPLETION.txt | 295 - main/api/account.ts | 23 - main/api/neural-hub.ts | 18 - main/automate-bypass.ps1 | 115 - main/automate-bypass.sh | 66 - main/automation/ENDPOINT_SYNC_EXTENSION.md | 21 - main/automation/README.md | 46 - main/automation/sync-all.ps1 | 99 - main/automation/sync-all.sh | 65 - main/automation/sync-config.json | 15 - main/clear-oauth.bat | 23 - main/deploy.ps1 | 234 - main/deploy.sh | 207 - main/disable-vercel-protection.ps1 | 33 - main/docker-compose.prod.yml | 125 - main/docker-compose.social.yml | 260 - main/docker-compose.staging.yml | 100 - main/docker-compose.yml | 96 - main/gitstatus.out | 0 main/k8s-app.yaml | 103 - main/k8s-configmap.yaml | 10 - main/k8s-namespace.yaml | 6 - main/k8s-postgres.yaml | 90 - main/k8s-redis.yaml | 77 - main/k8s-secret.yaml | 10 - main/lib/trading/neural-hub-pipeline.ts | 429 - main/lib/trading/technical-indicators.ts | 74 - .../2026-03-19/21-38-08/cache-stats.txt | 4 - .../2026-03-19/21-38-08/deleted-files.txt | 3 - .../21-38-08/object-id-map.old-new.txt | 147 - .../21-38-08/protected-dirt/466ea925-HEAD.csv | 1 - main/main-mirror/HEAD | 1 - main/main-mirror/config | 11 - main/main-mirror/description | 1 - main/main-mirror/hooks/applypatch-msg.sample | 15 - main/main-mirror/hooks/commit-msg.sample | 24 - .../hooks/fsmonitor-watchman.sample | 174 - main/main-mirror/hooks/post-update.sample | 8 - main/main-mirror/hooks/pre-applypatch.sample | 14 - main/main-mirror/hooks/pre-commit.sample | 49 - .../main-mirror/hooks/pre-merge-commit.sample | 13 - main/main-mirror/hooks/pre-push.sample | 53 - main/main-mirror/hooks/pre-rebase.sample | 169 - main/main-mirror/hooks/pre-receive.sample | 24 - .../hooks/prepare-commit-msg.sample | 42 - .../main-mirror/hooks/push-to-checkout.sample | 78 - .../hooks/sendemail-validate.sample | 77 - main/main-mirror/hooks/update.sample | 128 - main/main-mirror/info/exclude | 6 - main/main-mirror/info/refs | 280 - .../00/3eecba8ff3cb69cec471960a10a407a1271439 | Bin 780 -> 0 bytes .../06/5b6fa7208f2c77f6fe35f70a5e6795f2c09a41 | Bin 4211 -> 0 bytes .../06/808593691c4e6f425f26a3d2cc35f991e0408a | Bin 9019 -> 0 bytes .../09/62e7adc7d67fb3b72b907bc1e27e908ad8b009 | Bin 216 -> 0 bytes .../0b/38d6ccfa259181678733b97e5e885600b6e25c | Bin 781 -> 0 bytes .../0c/f70da50da63b394bb68a6bfe350e815d7ace7e | Bin 4270 -> 0 bytes .../12/03415b1c8bbd1aa11f51046e5b2691253b6033 | Bin 781 -> 0 bytes .../12/4be7b93a635139aa7597f4fb65be3bb4a58366 | Bin 781 -> 0 bytes .../19/222792d099e51926233b92a84267cdb867cadb | 2 - .../19/94a60e797f0219eafb1d4e16b0075df6494a6c | Bin 9304 -> 0 bytes .../19/a9cade88b929bab94a8ade3e06af00c486ed1b | 1 - .../1b/ca9c58e1d92d7092a35cd40966e23a73224b8f | Bin 780 -> 0 bytes .../21/8b166608b167a399b9d6311727062fd0d1344d | 1 - .../23/e8d5920cd3a487c51f8c8652fc24b96a1c0d3b | Bin 780 -> 0 bytes .../27/096b987130fb4d3bff5184362171f8c1d24d5d | 1 - .../27/3e2cee3a6aa512dcb247001bbd6d3bc69343dd | 3 - .../28/b4f23c6746499faf8d7e66a56bcee46764950a | Bin 232 -> 0 bytes .../28/bb6c1bcc4928db6efc9d1026a9d3d0b24883d3 | Bin 4247 -> 0 bytes .../29/bab6382b23aca9e06ec4f8325cc425551bf35b | Bin 4248 -> 0 bytes .../2c/06f0c79b62cd712b19a9b9bdca61bdc947b56e | 1 - .../2c/3f6a543c6f82ec50b815f34e5d292ec1a783bb | Bin 4248 -> 0 bytes .../2e/8863610da03beb038ce8581ed1f9651d871b55 | Bin 4272 -> 0 bytes .../2e/940f5859cb3adf79524a10218088046e7e8d4a | Bin 9284 -> 0 bytes .../2f/90f7cb20a4fe018900497119dc86aead028fe5 | Bin 780 -> 0 bytes .../36/b915345c2653533d70a22763dd22a530d74209 | Bin 9177 -> 0 bytes .../38/5b2c962d58b8788bdc1698b98be23410fbec16 | 1 - .../39/9b9dbb75db9f32c6381eeace15c027fb6b8758 | Bin 228 -> 0 bytes .../3a/2e7677566e90a9771a0e08c3ad1e32ac16009c | Bin 9017 -> 0 bytes .../3a/8741a733ad1b69cb2b77bcdb8edc1afc9f5285 | Bin 9018 -> 0 bytes .../3b/a5510bd1494f65a4fc8e4ef82ed9f9841885fd | 2 - .../3e/7ea060464862001c2808ee601a7c6c8f903e58 | 5 - .../3e/d6cdfa19b07f1eaf179c236b3ffe84950494c6 | Bin 4270 -> 0 bytes .../42/2e1b180b9809d8d9baa5306f94b0a93aa51933 | Bin 780 -> 0 bytes .../42/ccf4926295f112fc18960da28ea3f109b70fba | Bin 9303 -> 0 bytes .../43/3e2e332ae566a1e7a0baa6e1eec59786038a21 | Bin 452 -> 0 bytes .../44/a2257afab779ac41596c1768da4c88ccb5af0c | Bin 4247 -> 0 bytes .../45/46d9f642c0f75c8d1f3ed4327bc101a349c0ef | 1 - .../45/daf039fd6440f2969f6b5dc1addbdafb82782e | 2 - .../48/170dffff1d58d92283118f177d471de81d74cc | Bin 9303 -> 0 bytes .../49/6fed147f4e19d615cdd5ae456393914c7e3e71 | Bin 780 -> 0 bytes .../4a/2a9798896bc55f4d16361c1a8cc8e7660345b0 | Bin 226 -> 0 bytes .../4b/7b5e8b5cd925b529e33b3ddcf70ae957f3a56e | Bin 4270 -> 0 bytes .../4c/01af4e73de4f97d5793f8dd0e428d73b071e5c | Bin 9091 -> 0 bytes .../4e/bfb772e61c7ac99cc21470248851dd55bfd33c | Bin 4271 -> 0 bytes .../4e/ee1afc4dd779f51b2ccacef0fd716a20137ef4 | Bin 4270 -> 0 bytes .../51/58ef8528a012765cc812cce933a30bd9d17e66 | Bin 780 -> 0 bytes .../51/9434a699d13cd1d455bfaed056d254e5c93128 | Bin 245 -> 0 bytes .../54/12f18a192aca7ff86ed3a10bdb05f683d23439 | Bin 779 -> 0 bytes .../54/cc041820b8160747ba1955d65b8575f6ba0242 | Bin 780 -> 0 bytes .../60/a60701bdc664a912abeea00c7420564ac1bfa4 | Bin 4211 -> 0 bytes .../63/1965896effa264513d93e433c7dbd32c96cb3c | 2 - .../63/7c707e57a7536cd743ff845c6411cdd9775fc3 | Bin 214 -> 0 bytes .../65/7830fcc116ebf7c791f0992ed36c76d151a2c4 | Bin 9091 -> 0 bytes .../67/d7962b39755f8530ac20612d8d605d74416d60 | Bin 4212 -> 0 bytes .../6c/c16a42bfb6b6aac5d2b12d9e2d373f78fecedf | Bin 779 -> 0 bytes .../6c/d06796134219596ebf80de6fac6e085aca0599 | Bin 9178 -> 0 bytes .../6d/c886d0d29116cabfdeedab7ef52a1c6a76a88b | Bin 4248 -> 0 bytes .../6d/f9a6b57a70fc22ad75091207865679f39d2c23 | Bin 4248 -> 0 bytes .../6e/92517c3e37899b772f127525744dd51de74ad5 | Bin 9302 -> 0 bytes .../6f/0a452e45f13914696bc9ecb3efdd0f1c846303 | Bin 266 -> 0 bytes .../6f/3384dfb18e81383e04053beac22e50113db83c | Bin 240 -> 0 bytes .../70/af36f87703221d24d5cbd007c162344a812817 | 1 - .../70/eebc9e37e5ac4f7bb7266fecb5837afc217f2b | 2 - .../74/130995f1cd9003495bf089340119f0433363f0 | 1 - .../77/9b0a8f939e221604aeda5662575909710755e1 | 3 - .../77/b18e309b7d739eb3fd0362e6f57503bc6ef6da | Bin 4270 -> 0 bytes .../7a/ff9b237757f5e37e707f2146d7084b2d10e872 | Bin 9019 -> 0 bytes .../7c/25397ed2ce79d357f944a3279379f5e370b0d1 | Bin 4271 -> 0 bytes .../7c/28fcf90cfa4aaea467989c3e0288eea9376fa0 | Bin 780 -> 0 bytes .../7f/e0ca6e0bae63b68ba4b7eb16046b1d1027526b | 2 - .../80/f7f5746535147ec95d230e594938c263d1a8ec | Bin 4246 -> 0 bytes .../82/cc7c74282761a4c4d12055418c847166340fa8 | Bin 9176 -> 0 bytes .../85/b7c9507cdef9680e3cbe5c7d7362effbb1fb8b | Bin 4270 -> 0 bytes .../86/39ca18629b33cea057c325c25dcfb38ddf8d0b | Bin 780 -> 0 bytes .../8a/44c269bc6b183f0aa2bccfcdedbb6b98be02bd | Bin 780 -> 0 bytes .../8a/ecc18ca80a0c65daf19584c07ae230316a72c4 | 3 - .../8d/5ce7bb872a545c7887551b392762eb12330f9e | Bin 9285 -> 0 bytes .../92/c91329412402c8f2d9e7b08d075c68f34d4614 | Bin 4270 -> 0 bytes .../95/00320bbd629425b4c0867cac3e1a937d761606 | Bin 9303 -> 0 bytes .../97/340fe497fe4ae28e292455b30632002f8d6e15 | Bin 9018 -> 0 bytes .../98/8f0a4219ad5735b93ef3cdee20c0cb27ce7133 | 2 - .../99/86ac0b011ec560596928d735a9582192f10a41 | Bin 9018 -> 0 bytes .../99/af85d49323d6409911a8935b434be4f060888d | Bin 9264 -> 0 bytes .../99/ef15fa0abf5ceeb9fa9d1a3419b554f67034d3 | Bin 4247 -> 0 bytes .../9a/03be0853620564b5758af9a4589b0a74c26420 | Bin 9018 -> 0 bytes .../9b/29e11770c8bb24d26b12b143ae7764d17ae935 | Bin 4248 -> 0 bytes .../9e/4f506d6380f1e2429af74a0a941464e1448093 | Bin 9017 -> 0 bytes .../a0/be556c4288cd6f67280031a2db89b42a749fc4 | Bin 9019 -> 0 bytes .../a3/157042b291a3c3f4c7722cb8755f01530af649 | Bin 780 -> 0 bytes .../a3/8e203859492b0709bc5cf67e1619cc4f82847c | Bin 4250 -> 0 bytes .../a4/fbee9f24d85e1da2b3e55de7bef22f766fd7ee | Bin 192 -> 0 bytes .../a5/b50d6c1eb9ebbee7dccea0464c3b7246a62f95 | 1 - .../a7/0db4601cd1ec93ea8247ce56d66ec21fd4e714 | Bin 9019 -> 0 bytes .../a7/1dcdbc520e2c9079ad2bb720e07f77b5e94901 | Bin 4271 -> 0 bytes .../a7/69f992e33271546971b923e93edea5686292a2 | Bin 237 -> 0 bytes .../a9/b1bf9e099efdfb0dc4315e402b623b090ef24c | Bin 780 -> 0 bytes .../ab/266486ddacfcf9696a2e9d12c237bc0a207fa5 | Bin 780 -> 0 bytes .../ad/04a318c83e50de24a8e0d23c5a00c965a46aaa | Bin 9302 -> 0 bytes .../ae/8254018caefc48b99f096d6d5519e8f4532c7e | Bin 779 -> 0 bytes .../ae/ed71f2a2d6965f5bad744a9494cf6c5a755a31 | Bin 4271 -> 0 bytes .../b0/e0409e479cc13360fcc161c7a7216e874d1d00 | Bin 4248 -> 0 bytes .../b1/e363ffe0ea7dde37cfa08faef4adb7854a13ed | Bin 9177 -> 0 bytes .../b3/2c6178326823982caf921ab99943cca38f02a2 | Bin 4271 -> 0 bytes .../b3/fc8539054f2a35c50b907bd38cde2d8d41c1c0 | Bin 232 -> 0 bytes .../b5/2b0f767431cfcd161dbd0e0acaa3f2c2f0c472 | 1 - .../b5/5db5fcf98d023acfcddaf2c3f86adadcb9bf50 | Bin 781 -> 0 bytes .../b6/75c02205643d1afb7e46bf97b96905fa13034e | Bin 9178 -> 0 bytes .../b7/0539ad0335fc4a36e339bb2ce06e7249dfabc8 | Bin 244 -> 0 bytes .../bb/ee7ec051ae591f967867750be61c7fa30fb2be | Bin 216 -> 0 bytes .../bc/b94978e069cd16103462d1e297e90e00be4f5c | Bin 4270 -> 0 bytes .../bc/cf790361bc9507009678cc42a69f79687394bb | Bin 4272 -> 0 bytes .../be/2708f4bd765f5542ba57799507a5e4867df6e3 | Bin 229 -> 0 bytes .../bf/fc491da862bb98cc42b5f709933a9095186089 | 2 - .../c0/f72b2942a3f9bd3f642951bfac0ba433451a3e | Bin 780 -> 0 bytes .../c2/293d1069bbf0d97063785c656dfac5b6f2d827 | Bin 4270 -> 0 bytes .../c2/76155bcfde4958f5ce902cbb92aa548a33fba4 | Bin 780 -> 0 bytes .../c4/c659bac10b109e9f1067744d86c1e70013167f | Bin 9178 -> 0 bytes .../c6/cfb72e9ff06729f3f8df7dc6493fd37a523d15 | Bin 280 -> 0 bytes .../c9/1ba877ce1ce741c005953ac8ed521e440d5858 | Bin 9090 -> 0 bytes .../c9/d518bdaacae31432ccc9cae4022e0e869b8951 | Bin 4247 -> 0 bytes .../ca/616ee242f33aee743b7a8c69a78c1f32dfb2cb | Bin 4270 -> 0 bytes .../ca/fafe74d169561f79030a95fd1d4bcf4f68a23d | Bin 781 -> 0 bytes .../cc/7834430c00243f6036de34c8e0e0fc1af882cd | 2 - .../d0/da88de2951629ed5e2c4c2658e8ecdfbfa2a39 | 1 - .../d3/86bbc22f3214ec99edf80c7c9034f934bc89bc | Bin 9303 -> 0 bytes .../d8/7659dc0ff4e4c7b921e32491e4aa5024f34323 | Bin 9018 -> 0 bytes .../d9/4286cd163d8be8d454e5dc3ad64e3f607a2b2e | Bin 9177 -> 0 bytes .../df/f9ad7b5416f26639d84dcab693dad958142998 | Bin 4270 -> 0 bytes .../df/fc07ab175f88cedcadacb17634ff79ba033e67 | Bin 234 -> 0 bytes .../e4/e63b8660ddb43f67d7df6445e8445afa9d4bef | Bin 780 -> 0 bytes .../e6/283afde6bbc68e915e4f811c890ef5f55871c2 | Bin 781 -> 0 bytes .../e6/5f2b4417d4b09c69b4561f737d6a0a29d2be7f | Bin 4269 -> 0 bytes .../e6/b4d8d2ce4ea22f041921acc8983190fa707dc7 | Bin 781 -> 0 bytes .../ea/6e63924817991b082b10790f0b5dff2ba73c15 | Bin 212 -> 0 bytes .../eb/6800e85b8bc6ab026772529c6eef099206408e | Bin 4270 -> 0 bytes .../ec/cf0e699c14ba04aaab09a95281ac108feef59b | Bin 4271 -> 0 bytes .../ed/a89e31c99389393d1385be79e1cf1b5fa9e597 | Bin 9286 -> 0 bytes .../ef/c256f6bdf83b4fa29efb555f9f8d75d0179468 | Bin 9178 -> 0 bytes .../f4/75ce01c41e6d93c6a9fd31e18b8b8e01869207 | Bin 4249 -> 0 bytes .../f5/0314085d75e02979e2ee804d5e8579f896025a | Bin 202 -> 0 bytes .../f5/f244f3785aa683bccf28cc00be28a86890af71 | Bin 4270 -> 0 bytes .../f7/13fc08933ff89d4a12370187085128f9aebcac | Bin 9069 -> 0 bytes .../fb/bce8db47a1571734321dc7eddd644326c37ef0 | Bin 4249 -> 0 bytes .../fd/41b194d469c2fae5324b91a26a6303e9ce7d95 | 3 - main/main-mirror/objects/info/commit-graph | Bin 107732 -> 0 bytes main/main-mirror/objects/info/packs | 2 - ...67431a565d80139e551925012f85b278dbd.bitmap | Bin 60536 -> 0 bytes ...ffd67431a565d80139e551925012f85b278dbd.idx | Bin 292328 -> 0 bytes ...fd67431a565d80139e551925012f85b278dbd.pack | Bin 26343825 -> 0 bytes ...ffd67431a565d80139e551925012f85b278dbd.rev | Bin 41660 -> 0 bytes main/main-mirror/packed-refs | 96 - main/models.yml | 32 - main/models/neuralHubConfig.ts | 18 - main/models/tradingModels.ts | 20 - main/nginx.conf | 113 - main/open-tradehax.ps1 | 111 - main/scripts/api-connection-manager.js | 422 - main/scripts/auto-push-commit-deploy.sh | 18 - main/scripts/autonomous-push-orchestrator.js | 449 - main/scripts/check-dns.sh | 15 - main/scripts/clear-oauth-permanent.js | 478 - main/scripts/deploy-tradehax.ps1 | 215 - main/scripts/deploy-tradehax.sh | 20 - main/scripts/deploy.bat | 61 - main/scripts/deploy.sh | 156 - main/scripts/endpoint-health-check.js | 79 - main/scripts/env-autofill.js | 49 - main/scripts/full-deploy-check.sh | 13 - main/scripts/full-health-check.ps1 | 52 - main/scripts/full-health-check.sh | 39 - main/scripts/livepass-validation.ps1 | 228 - main/scripts/mcp-orchestrator.js | 520 - main/scripts/namecheap-dns-copypasta.bat | 49 - main/scripts/namecheap-dns-copypasta.ps1 | 167 - main/scripts/namecheap-dns-copypasta.sh | 145 - main/scripts/nmap-check.sh | 6 - main/scripts/predeploy-check.js | 30 - main/scripts/setup-gitlens.js | 480 - main/scripts/social-mcp-servers.js | 918 - main/scripts/social-sync-orchestrator.js | 431 - main/scripts/supabase-migrate-ci.sh | 20 - main/scripts/supabase-migrate.ps1 | 18 - main/scripts/supabase-migrate.sh | 21 - main/scripts/sync-env-to-vercel.js | 33 - .../trading-gate-integration-check.mjs | 164 - main/scripts/trading-gate-smoke-test.mjs | 256 - main/scripts/unified-mcp-push.js | 253 - main/scripts/validate-runtime-patch.mjs | 129 - main/scripts/verify-hf-token.js | 110 - main/scripts/webhook-handler.js | 36 - main/services/dataService.ts | 47 - main/services/llmService.ts | 43 - main/services/neuralBotService.ts | 79 - main/setup-neural-engine.ps1 | 188 - main/setup-neural-engine.sh | 176 - main/supabase_schema.sql | 157 - main/sync-env-to-vercel.ps1 | 26 - main/test-openai-api.ps1 | 94 - main/test-openai-api.sh | 59 - main/tmp_alias_ascii.txt | 22 - main/tmp_alias_backup_before_fix.txt | 22 - main/tmp_alias_by_org.txt | 7 - main/tmp_alias_check_after_set.txt | Bin 5748 -> 0 bytes main/tmp_alias_current.txt | Bin 5742 -> 0 bytes main/tmp_alias_default_scope.txt | 10 - main/tmp_alias_hackai_after_deploy.txt | Bin 872 -> 0 bytes main/tmp_alias_hackavelliz.txt | 8 - main/tmp_alias_live_check.txt | Bin 5748 -> 0 bytes main/tmp_alias_ls_hackavelliz_latest.txt | Bin 5490 -> 0 bytes main/tmp_alias_owner7282.txt | Bin 936 -> 0 bytes main/tmp_alias_set_tradehax.txt | 7 - main/tmp_alias_set_tradehax_fix.txt | Bin 1174 -> 0 bytes main/tmp_alias_set_tradehax_now.txt | Bin 1170 -> 0 bytes main/tmp_alias_set_www.txt | 7 - main/tmp_alias_set_www_now.txt | Bin 1194 -> 0 bytes main/tmp_alias_set_www_tradehax_fix.txt | Bin 1198 -> 0 bytes main/tmp_alias_tradehax_fresh.txt | Bin 1174 -> 0 bytes main/tmp_alias_tradehax_net.txt | 7 - main/tmp_alias_tradehax_recovery.txt | Bin 1174 -> 0 bytes main/tmp_alias_tradehaxai_me_fresh.txt | Bin 1180 -> 0 bytes main/tmp_alias_tradehaxai_me_recovery.txt | Bin 1180 -> 0 bytes main/tmp_alias_tradehaxai_tech_fresh.txt | Bin 1192 -> 0 bytes main/tmp_alias_tradehaxai_tech_recovery.txt | Bin 1192 -> 0 bytes main/tmp_alias_www_tradehax_fresh.txt | Bin 1198 -> 0 bytes main/tmp_alias_www_tradehax_net.txt | 7 - main/tmp_alias_www_tradehax_recovery.txt | Bin 1198 -> 0 bytes main/tmp_alias_www_tradehaxai_me_fresh.txt | Bin 1204 -> 0 bytes main/tmp_alias_www_tradehaxai_me_recovery.txt | Bin 1204 -> 0 bytes main/tmp_alias_www_tradehaxai_tech_fresh.txt | Bin 1216 -> 0 bytes ...tmp_alias_www_tradehaxai_tech_recovery.txt | Bin 1216 -> 0 bytes main/tmp_apex_body_ascii.html | 14 - main/tmp_apex_final_check.txt | 9 - main/tmp_apex_headers_ascii.txt | 32 - main/tmp_branch_after_push.txt | 8 - main/tmp_branch_latest2.txt | 8 - main/tmp_branch_ship.txt | 8 - main/tmp_build_after_fix.txt | Bin 4790 -> 0 bytes main/tmp_build_after_rewrites.txt | Bin 2134 -> 0 bytes main/tmp_build_check.txt | Bin 3296 -> 0 bytes main/tmp_build_exit.txt | Bin 3142 -> 0 bytes main/tmp_build_exit2.txt | Bin 3142 -> 0 bytes main/tmp_build_final2.txt | Bin 3208 -> 0 bytes main/tmp_build_now.txt | Bin 2106 -> 0 bytes main/tmp_build_now_status.txt | Bin 10 -> 0 bytes main/tmp_build_output.txt | 20 - main/tmp_build_release.txt | Bin 2134 -> 0 bytes main/tmp_build_release_status.txt | Bin 10 -> 0 bytes main/tmp_build_status.txt | Bin 10 -> 0 bytes main/tmp_commit_ship_result.txt | 21 - main/tmp_curl_test.txt | Bin 2234 -> 0 bytes main/tmp_deploy_after_rewrites.txt | Bin 4706 -> 0 bytes main/tmp_deploy_compare.txt | 4 - main/tmp_deploy_compare_bypass.txt | 4 - main/tmp_deploy_fix.txt | Bin 4814 -> 0 bytes main/tmp_deploy_latest.txt | Bin 4660 -> 0 bytes main/tmp_deploy_ship_result.txt | 0 main/tmp_deploy_ship_result2.txt | Bin 738 -> 0 bytes main/tmp_deploy_status.txt | 1 - main/tmp_deploy_tradehax_scope_hackai.txt | Bin 1100 -> 0 bytes ..._deploy_tradehax_scope_hackai_afterfix.txt | 0 main/tmp_deployment_check.txt | 25 - main/tmp_dist_check.txt | 6 - main/tmp_dns_guide_after_patch.txt | Bin 1094 -> 0 bytes main/tmp_dns_verify_after_patch.txt | Bin 360 -> 0 bytes main/tmp_dns_verify_after_patch2.txt | Bin 360 -> 0 bytes main/tmp_domain_inspect_tradehax_net.txt | Bin 1780 -> 0 bytes main/tmp_domain_inspect_tradehaxai_me.txt | Bin 1786 -> 0 bytes main/tmp_domain_inspect_tradehaxai_tech.txt | Bin 1798 -> 0 bytes main/tmp_domain_inspect_www_tradehax_net.txt | Bin 1796 -> 0 bytes main/tmp_domain_inspect_www_tradehaxai_me.txt | Bin 1802 -> 0 bytes ...tmp_domain_inspect_www_tradehaxai_tech.txt | Bin 1814 -> 0 bytes main/tmp_endpoint_check.txt | 4 - main/tmp_endpoint_diagnosis.json | 38 - main/tmp_endpoint_headers.txt | 24 - main/tmp_endpoint_matrix.txt | 6 - main/tmp_endpoint_matrix_after_fix.txt | 6 - main/tmp_endpoint_test_308.txt | Bin 930 -> 0 bytes main/tmp_endpoint_titles.txt | 6 - main/tmp_final_page_check.html | Bin 834 -> 0 bytes main/tmp_fix_build.txt | Bin 3312 -> 0 bytes main/tmp_fix_status.txt | Bin 10 -> 0 bytes main/tmp_fresh_deploy_body.html | Bin 30720 -> 0 bytes main/tmp_fresh_deploy_capture.txt | Bin 3766 -> 0 bytes main/tmp_git_commit.txt | 72 - main/tmp_git_commit_redirect_fix.txt | 17 - main/tmp_git_push.txt | 8 - main/tmp_git_status.txt | 15 - main/tmp_git_status_check2.txt | Bin 1494 -> 0 bytes main/tmp_health_current.txt | 1 - main/tmp_health_file_check.txt | 0 main/tmp_live_asset_body.txt | Bin 414190 -> 0 bytes main/tmp_live_asset_headers.txt | Bin 3840 -> 0 bytes main/tmp_log_after_push.txt | 3 - main/tmp_log_latest2.txt | 2 - main/tmp_ls_default_after_unlink.txt | Bin 790 -> 0 bytes main/tmp_ls_default_scope.txt | 8 - main/tmp_ls_hackai_after_deploy.txt | Bin 790 -> 0 bytes main/tmp_ls_hackavelliz.txt | 8 - main/tmp_ls_owner7282.txt | Bin 824 -> 0 bytes main/tmp_manual_vercel_prod_deploy.txt | Bin 1382 -> 0 bytes main/tmp_namecheap_commit_a0548da.txt | 2 - main/tmp_namecheap_commit_ed451f2.txt | 2 - main/tmp_namecheap_last_commit.txt | Bin 206 -> 0 bytes main/tmp_neural_deploy.txt | Bin 4518 -> 0 bytes main/tmp_npm_list.txt | 7 - main/tmp_pro_build.txt | Bin 2134 -> 0 bytes main/tmp_pro_build_status.txt | Bin 10 -> 0 bytes main/tmp_pro_deploy.txt | Bin 4978 -> 0 bytes main/tmp_projects_by_org.txt | 7 - main/tmp_projects_default_after_unlink.txt | Bin 816 -> 0 bytes main/tmp_projects_ls.txt | 8 - main/tmp_projects_owner7282.txt | Bin 884 -> 0 bytes main/tmp_push_ship_result.txt | 0 main/tmp_rebuild_output.txt | Bin 2134 -> 0 bytes main/tmp_recovery_build.txt | Bin 2104 -> 0 bytes main/tmp_recovery_build_status.txt | Bin 10 -> 0 bytes main/tmp_recovery_deploy.txt | Bin 4992 -> 0 bytes main/tmp_redirect_chain_health.txt | 59 - main/tmp_redirect_chain_tradehax.txt | 59 - main/tmp_release_build.txt | Bin 2134 -> 0 bytes main/tmp_release_build_status.txt | Bin 10 -> 0 bytes main/tmp_release_check.txt | 29 - main/tmp_release_deploy.txt | Bin 5416 -> 0 bytes main/tmp_remote_ship.txt | 6 - main/tmp_security_build.txt | Bin 2134 -> 0 bytes main/tmp_security_build_status.txt | Bin 10 -> 0 bytes main/tmp_security_deploy.txt | Bin 5136 -> 0 bytes main/tmp_smoke_test_output.txt | Bin 192 -> 0 bytes main/tmp_status_ship.txt | 17 - main/tmp_teams.txt | 12 - main/tmp_teams_full.txt | 12 - main/tmp_tradehax_blank_test.html | Bin 1666 -> 0 bytes main/tmp_tradehax_http.txt | Bin 2322 -> 0 bytes main/tmp_tradehax_post_rollback.txt | Bin 2234 -> 0 bytes main/tmp_tradehaxai_me_test.txt | Bin 5430 -> 0 bytes main/tmp_tradehaxai_tech_test.txt | Bin 5412 -> 0 bytes main/tmp_vercel_alias_hackai.txt | Bin 872 -> 0 bytes main/tmp_vercel_alias_hackavelliz.txt | Bin 876 -> 0 bytes main/tmp_vercel_deploy_final.txt | Bin 1216 -> 0 bytes main/tmp_vercel_deploy_force.txt | Bin 4508 -> 0 bytes main/tmp_vercel_deploy_result.txt | Bin 4256 -> 0 bytes main/tmp_vercel_deploy_result2.txt | Bin 4482 -> 0 bytes main/tmp_vercel_deployments_default.txt | Bin 1060 -> 0 bytes main/tmp_vercel_dir_listing.txt | 7 - main/tmp_vercel_env_list.txt | Bin 1366 -> 0 bytes main/tmp_vercel_link_now.txt | Bin 1306 -> 0 bytes main/tmp_vercel_ls_current.txt | 8 - main/tmp_vercel_projects_default.txt | Bin 816 -> 0 bytes ...mp_vercel_projects_hackai_after_unlink.txt | Bin 816 -> 0 bytes main/tmp_vercel_teams.txt | Bin 1068 -> 0 bytes main/tmp_vercel_token_state.txt | 1 - main/tmp_vercel_version_check.txt | Bin 710 -> 0 bytes main/tmp_vercel_whoami.txt | Bin 24 -> 0 bytes main/tmp_vercel_whoami_after_unlink.txt | Bin 24 -> 0 bytes main/tmp_vercel_whoami_now.txt | Bin 50 -> 0 bytes main/tmp_vercel_whoami_ship.txt | Bin 960 -> 0 bytes main/tmp_verify_build.txt | Bin 3142 -> 0 bytes main/tmp_verify_build_status.txt | Bin 10 -> 0 bytes main/tmp_web_build_output.txt | Bin 2134 -> 0 bytes main/tmp_web_dir_force.txt | 20 - main/tmp_white_build_check.txt | Bin 2106 -> 0 bytes main/tmp_whoami.txt | 1 - main/tmp_whoami_after_logout.txt | 8 - main/tmp_www_after_fresh_body.html | Bin 2490 -> 0 bytes main/tmp_www_after_fresh_headers.txt | Bin 3766 -> 0 bytes main/tmp_www_endpoint_check.txt | 4 - main/tmp_www_headers_ascii.txt | 22 - main/tmp_www_health_final_check.txt | 26 - main/tmp_www_recovery_asset_headers.txt | Bin 3860 -> 0 bytes main/tmp_www_recovery_body.html | Bin 2488 -> 0 bytes main/tmp_www_recovery_headers.txt | Bin 3748 -> 0 bytes main/tmp_www_root_headers_body.txt | 31 - main/tmp_www_tradehax_post_rollback.txt | Bin 5436 -> 0 bytes main/tmp_www_tradehax_test.html | Bin 834 -> 0 bytes main/tmp_xai_commit.txt | 1 - main/tmp_xai_commit_exit.txt | 1 - main/vendor/massive-client-js | 1 - main/vercel-deploy-webhook.json | 6 - main/verify-deployment.ps1 | 107 - main/verify-tradehax-deployment.sh | 180 - qodana.yaml | 10 - skills-lock.json | 10 - sync-log.txt | Bin 1232 -> 0 bytes tools/generate_placeholders.py | 59 - tools/generate_responsive.py | 66 - tools/png_to_webp.py | 31 - tradehax-cleanup.sh | 257 - tradehax-crypto-education.jsonl | 10 - tradehax-domain-priority.jsonl | 28 - tradehax-repo/DEPLOYMENT_SUMMARY.md | 357 - tradehax-repo/WINDOWS_SETUP.md | 246 - .../fine-tune-requirements-windows.txt | 16 - .../scripts/install-hf-deps-windows.ps1 | 33 - tradehax-training-expanded.jsonl | 10 - tradehaxai.code-workspace | 42 - 1569 files changed, 266030 deletions(-) delete mode 100644 .agents/skills/supabase-postgres-best-practices/AGENTS.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/CLAUDE.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/README.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/SKILL.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/_contributing.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/_sections.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/_template.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/conn-limits.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/conn-pooling.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/data-batch-inserts.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/data-n-plus-one.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/data-pagination.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/data-upsert.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/lock-advisory.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/lock-short-transactions.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/lock-skip-locked.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/query-composite-indexes.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/query-covering-indexes.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/query-index-types.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/query-missing-indexes.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/query-partial-indexes.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/schema-constraints.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/schema-data-types.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/schema-partitioning.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/schema-primary-keys.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/security-privileges.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/security-rls-basics.md delete mode 100644 .agents/skills/supabase-postgres-best-practices/references/security-rls-performance.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/AGENTS.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/CLAUDE.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/README.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/SKILL.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/_contributing.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/_sections.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/_template.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/conn-limits.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/conn-pooling.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/data-batch-inserts.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/data-n-plus-one.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/data-pagination.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/data-upsert.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/lock-advisory.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/lock-short-transactions.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/lock-skip-locked.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/query-composite-indexes.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/query-covering-indexes.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/query-index-types.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/query-missing-indexes.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/query-partial-indexes.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/schema-constraints.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/schema-data-types.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/schema-partitioning.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/schema-primary-keys.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/security-privileges.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/security-rls-basics.md delete mode 100644 .augment/skills/supabase-postgres-best-practices/references/security-rls-performance.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/AGENTS.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/CLAUDE.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/README.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/SKILL.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/_contributing.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/_sections.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/_template.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/conn-limits.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/conn-pooling.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/data-batch-inserts.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/data-n-plus-one.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/data-pagination.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/data-upsert.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/lock-advisory.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/lock-short-transactions.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/lock-skip-locked.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/query-composite-indexes.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/query-covering-indexes.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/query-index-types.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/query-missing-indexes.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/query-partial-indexes.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/schema-constraints.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/schema-data-types.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/schema-partitioning.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/schema-primary-keys.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/security-privileges.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/security-rls-basics.md delete mode 100644 .claude/skills/supabase-postgres-best-practices/references/security-rls-performance.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/AGENTS.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/CLAUDE.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/README.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/SKILL.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/_contributing.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/_sections.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/_template.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/conn-limits.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/conn-pooling.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/data-batch-inserts.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/data-n-plus-one.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/data-pagination.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/data-upsert.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/lock-advisory.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/lock-short-transactions.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/lock-skip-locked.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/query-composite-indexes.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/query-covering-indexes.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/query-index-types.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/query-missing-indexes.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/query-partial-indexes.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/schema-constraints.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/schema-data-types.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/schema-partitioning.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/schema-primary-keys.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/security-privileges.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/security-rls-basics.md delete mode 100644 .continue/skills/supabase-postgres-best-practices/references/security-rls-performance.md delete mode 100644 .env.vercel.production.template delete mode 100644 .github/workflows/aggressive-proof-gate.yml delete mode 100644 .github/workflows/ai-micro-ci.yml delete mode 100644 .github/workflows/ethicalcheck.yml delete mode 100644 .github/workflows/github-pages.yml delete mode 100644 .github/workflows/hivemind-quality-gate.yml delete mode 100644 .github/workflows/install-hooks-test.yml delete mode 100644 .github/workflows/intelligence-ingest-quality.yml delete mode 100644 .github/workflows/lighthouse-ci.yml delete mode 100644 .github/workflows/live-delta-dataset-refresh.yml delete mode 100644 .github/workflows/nextjs.yml.disabled delete mode 100644 .github/workflows/qodana_code_quality.yml delete mode 100644 .github/workflows/readiness-gate.yml delete mode 100644 .gitlab-ci.yml delete mode 100644 .htaccess delete mode 100644 .idea/.gitignore delete mode 100644 .idea/IntelliLang.xml delete mode 100644 .idea/go.imports.xml delete mode 100644 .idea/misc.xml delete mode 100644 .idea/modules.xml delete mode 100644 .idea/tradehax.iml delete mode 100644 .idea/vcs.xml delete mode 100644 .junie/skills/supabase-postgres-best-practices/AGENTS.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/CLAUDE.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/README.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/SKILL.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/_contributing.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/_sections.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/_template.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/conn-limits.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/conn-pooling.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/data-batch-inserts.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/data-n-plus-one.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/data-pagination.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/data-upsert.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/lock-advisory.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/lock-short-transactions.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/lock-skip-locked.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/query-composite-indexes.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/query-covering-indexes.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/query-index-types.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/query-missing-indexes.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/query-partial-indexes.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/schema-constraints.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/schema-data-types.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/schema-partitioning.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/schema-primary-keys.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/security-privileges.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/security-rls-basics.md delete mode 100644 .junie/skills/supabase-postgres-best-practices/references/security-rls-performance.md delete mode 100644 .lighthouserc.json delete mode 100644 .vscode-extension/package.json delete mode 100644 .vscode-extension/src/extension.ts delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/AGENTS.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/CLAUDE.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/README.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/SKILL.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/_contributing.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/_sections.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/_template.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/conn-limits.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/conn-pooling.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/data-batch-inserts.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/data-n-plus-one.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/data-pagination.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/data-upsert.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/lock-advisory.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/lock-short-transactions.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/lock-skip-locked.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/query-composite-indexes.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/query-covering-indexes.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/query-index-types.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/query-missing-indexes.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/query-partial-indexes.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/schema-constraints.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/schema-data-types.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/schema-partitioning.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/schema-primary-keys.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/security-privileges.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/security-rls-basics.md delete mode 100644 .windsurf/skills/supabase-postgres-best-practices/references/security-rls-performance.md delete mode 100644 .zencoder/rules/repo.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/AGENTS.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/CLAUDE.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/README.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/SKILL.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/_contributing.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/_sections.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/_template.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/conn-limits.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/conn-pooling.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/data-batch-inserts.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/data-n-plus-one.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/data-pagination.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/data-upsert.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/lock-advisory.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/lock-short-transactions.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/lock-skip-locked.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/query-composite-indexes.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/query-covering-indexes.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/query-index-types.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/query-missing-indexes.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/query-partial-indexes.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/schema-constraints.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/schema-data-types.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/schema-partitioning.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/schema-primary-keys.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/security-privileges.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/security-rls-basics.md delete mode 100644 .zencoder/skills/supabase-postgres-best-practices/references/security-rls-performance.md delete mode 100644 .zenflow/settings.json delete mode 160000 1/tradehaxai-assistant delete mode 100644 90_DAY_EXECUTION_PLAN.md delete mode 100644 AGENTS.md delete mode 100644 AI_ENVIRONMENT_STANDARDS.md delete mode 100644 AI_ENVIRONMENT_TEMPLATE.env delete mode 100644 AI_LIVE_ENV_BLUEPRINT.env delete mode 100644 AI_NAVIGATOR_IMPLEMENTATION_PLAN.md delete mode 100644 AI_SETUP_SUMMARY.md delete mode 100644 API_DOCUMENTATION.md delete mode 100644 BLOG_PAGE_VERIFICATION.md delete mode 100644 BUILD_COMPLETE.md delete mode 100644 CLEANUP_SUMMARY.md delete mode 100644 COMPLETE_AUTOMATION_GUIDE.md delete mode 100644 COMPLETE_DEPLOYMENT_GUIDE.md delete mode 100644 CUSTOM_LLM_MODEL_PLAN.md delete mode 100644 DEPLOYMENT_CHECKLIST.md delete mode 100644 DEPLOYMENT_FINAL_SUMMARY.md delete mode 100644 DEPLOYMENT_FIX_CHECKLIST.md delete mode 100644 DEPLOYMENT_FIX_SUMMARY.md delete mode 100644 DEPLOYMENT_PATHS.md delete mode 100644 DEPLOYMENT_QUICKSTART.md delete mode 100644 DEPLOYMENT_READY.txt delete mode 100644 DIGITAL_EMPIRE_STRATEGY.md delete mode 100644 DISCORD_APP_SETUP.md delete mode 100644 DNS_COMPARISON_TABLE.md delete mode 100644 DNS_CONFIGURATION_SUMMARY.md delete mode 100644 DNS_INDEX.md delete mode 100644 DNS_INSPECTION_REPORT.md delete mode 100644 DNS_QUICK_FIX.md delete mode 100644 DOCS_INDEX.md delete mode 100644 EXECUTION_SUMMARY.md delete mode 100644 FINAL_STATUS_REPORT.md delete mode 100644 FINTECH_PAYMENT_RAILS_SETUP.md delete mode 100644 GITHUB_SECRETS_SETUP.md delete mode 100644 GITHUB_SYNC_COMPLETE.md delete mode 100644 GITLAB_AGENT_DEPLOYMENT.md delete mode 100644 HANDOFF_BUNDLE.md delete mode 100644 HARD_LAUNCH_RUNBOOK.md delete mode 100644 HF_DATASET_UPLOAD.md delete mode 100644 HF_FINE_TUNING_WORKFLOW.md delete mode 100644 HF_INTEGRATION_GUIDE.md delete mode 100644 HF_SETUP_GUIDE.md delete mode 100644 IDE_AUTOMATION_WORKFLOW.md delete mode 100644 IDE_PIPELINE_READY.md delete mode 100644 IDE_PIPELINE_WORKFLOW.md delete mode 100644 INTEGRATION_GUIDE.md delete mode 100644 INTELLIGENCE_BUILD_LOG.md delete mode 100644 KUBERNETES_DEPLOYMENT_STATUS.md delete mode 100644 KUBERNETES_READY.md delete mode 100644 LOCAL_REPO_WORKFLOW.md delete mode 100644 MONETIZATION_GUIDE.md delete mode 100644 NAMECHEAP_CPANEL_DEPLOYMENT.md delete mode 100644 NAMECHEAP_MIGRATION_CHECKLIST.md delete mode 100644 PERMISSIVE_CONFIG.md delete mode 100644 PIPELINE_QUICKSTART.md delete mode 100644 QUICK_START.md delete mode 100644 QUICK_VISUAL_FIX.md delete mode 100644 SETUP_VERIFICATION.md delete mode 100644 TESTING_GUIDE.md delete mode 100644 TRADEBOT_TRAINING_PIPELINE.md delete mode 100644 TRADEHAX_AI_PLATFORM_SUMMARY.md delete mode 100644 TRANSFORMATION_COMPLETE.md delete mode 100644 Untitled-1.yml delete mode 100644 VERCEL_BRANCH_FIX.md delete mode 100644 VERCEL_DEPLOYMENT_TROUBLESHOOTING.md delete mode 100644 VERCEL_DIAGNOSIS.md delete mode 100644 VERCEL_DOMAIN_SETUP.md delete mode 100644 VERCEL_STATIC_EXPORT_FIX.md delete mode 100644 _safe_backup/NeuralHub.jsx delete mode 100644 _safe_backup/massive-storage.js delete mode 100644 _safe_backup/package.json delete mode 100644 ai-micro/Dockerfile delete mode 100644 ai-micro/package.json delete mode 100644 ai-micro/src/index.ts delete mode 100644 ai-micro/tsconfig.json delete mode 100644 ai-training-set.jsonl delete mode 100644 ai/server/index.ts delete mode 100644 ai/server/model.ts delete mode 100644 ai/server/security-middleware.ts delete mode 100644 archive/docs/AI_LLM_INTEGRATION.md delete mode 100644 archive/docs/AI_PROMPTS.md delete mode 100644 archive/docs/AUDIO_DEPLOYMENT_GUIDE.md delete mode 100644 archive/docs/CLEANUP_COMPLETE.md delete mode 100644 archive/docs/COMPLETION_SUMMARY.md delete mode 100644 archive/docs/CRYPTO_PAGE_SECURITY_AUDIT.md delete mode 100644 archive/docs/CURRENT_STATUS.md delete mode 100644 archive/docs/DELIVERABLES_SUMMARY.md delete mode 100644 archive/docs/DEPLOYMENT.md delete mode 100644 archive/docs/DEPLOYMENT_CHECKLIST.md delete mode 100644 archive/docs/DEPLOYMENT_COMPLETE.md delete mode 100644 archive/docs/DEPLOYMENT_FLOW_DIAGRAM.md delete mode 100644 archive/docs/DEPLOYMENT_GUIDE.md delete mode 100644 archive/docs/DEPLOYMENT_IMPLEMENTATION_SUMMARY.md delete mode 100644 archive/docs/DEPLOYMENT_QUICK_REF.md delete mode 100644 archive/docs/DEPLOYMENT_SUMMARY.md delete mode 100644 archive/docs/DEPLOYMENT_SYNC_GUIDE.md delete mode 100644 archive/docs/DEPLOYMENT_SYNC_IMPLEMENTATION.md delete mode 100644 archive/docs/DNS_SETUP_INSTRUCTIONS.md delete mode 100644 archive/docs/DOMAIN_SETUP.md delete mode 100644 archive/docs/DOMAIN_SETUP_GUIDE.md delete mode 100644 archive/docs/EMULATOR_README.md delete mode 100644 archive/docs/FINAL_SUMMARY.md delete mode 100644 archive/docs/HYPERBOREA_PHASE3_LAUNCH.md delete mode 100644 archive/docs/IMPLEMENTATION_COMPLETE.md delete mode 100644 archive/docs/IMPLEMENTATION_SUMMARY_OLD.md delete mode 100644 archive/docs/INDEX.md delete mode 100644 archive/docs/INTEGRATION_SUMMARY.md delete mode 100644 archive/docs/ISSUES_AND_RESOLUTIONS.md delete mode 100644 archive/docs/ISSUE_51_IMPLEMENTATION_SUMMARY.md delete mode 100644 archive/docs/LAUNCH_CHECKLIST.md delete mode 100644 archive/docs/MIGRATION_SUMMARY.md delete mode 100644 archive/docs/MONETIZATION_SETUP.md delete mode 100644 archive/docs/NFT_IMPLEMENTATION_SUMMARY.md delete mode 100644 archive/docs/NFT_MINT_GUIDE.md delete mode 100644 archive/docs/PAYPAL_SETUP_GUIDE.md delete mode 100644 archive/docs/PHASE_2_COMPLETION_REPORT.md delete mode 100644 archive/docs/PRODUCTION_DEPLOYMENT_SUMMARY.md delete mode 100644 archive/docs/PROJECT_STRUCTURE.md delete mode 100644 archive/docs/QUICK_API_REFERENCE.md delete mode 100644 archive/docs/QUICK_DEPLOY.md delete mode 100644 archive/docs/QUICK_DEPLOY_CHECKLIST.md delete mode 100644 archive/docs/README.md delete mode 100644 archive/docs/README_PRODUCTION.md delete mode 100644 archive/docs/REBUILD_COMPLETE.md delete mode 100644 archive/docs/REBUILD_SUMMARY.md delete mode 100644 archive/docs/ROM_LIBRARY.md delete mode 100644 archive/docs/ROM_MANIFEST.md delete mode 100644 archive/docs/SECURITY_AUDIT_REPORT.md delete mode 100644 archive/docs/SECURITY_AUDIT_REPORT_2025.md delete mode 100644 archive/docs/SECURITY_FIX.md delete mode 100644 archive/docs/SECURITY_HARDENING.md delete mode 100644 archive/docs/SETUP_COMPLETE.md delete mode 100644 archive/docs/SETUP_INSTRUCTIONS_FOR_OWNER.md delete mode 100644 archive/docs/SHAMROCK_SETUP.md delete mode 100644 archive/docs/TASK_SYSTEM_README.md delete mode 100644 archive/docs/TODO.md delete mode 100644 archive/docs/VERCEL_ANALYTICS.md delete mode 100644 archive/docs/VERCEL_API_SETUP.md delete mode 100644 archive/docs/VERCEL_DEPLOYMENT_CHECKLIST.md delete mode 100644 archive/docs/VERCEL_DNS_SETUP.md delete mode 100644 archive/legacy-code/.azure/plan.copilotmd delete mode 100644 archive/legacy-code/.zencoder/rules/repo.md delete mode 100644 archive/legacy-code/Anchor.toml delete mode 100644 archive/legacy-code/Makefile delete mode 100644 archive/legacy-code/README.md delete mode 100644 archive/legacy-code/clover-exchange.js delete mode 100644 archive/legacy-code/clover-goals.js delete mode 100644 archive/legacy-code/config.js delete mode 100644 archive/legacy-code/deploy-mainnet.sh delete mode 100644 archive/legacy-code/legacy-games/README.md delete mode 100644 archive/legacy-code/legacy-games/convert.html delete mode 100644 archive/legacy-code/legacy-games/emulator.html delete mode 100644 archive/legacy-code/legacy-games/featured-roms.html delete mode 100644 archive/legacy-code/legacy-games/games-index.html delete mode 100644 archive/legacy-code/legacy-games/games.html delete mode 100644 archive/legacy-code/legacy-games/goals-dashboard.html delete mode 100644 archive/legacy-code/legacy-games/hub.html delete mode 100644 archive/legacy-code/legacy-games/hyperborea.html delete mode 100644 archive/legacy-code/legacy-games/index.html delete mode 100644 archive/legacy-code/legacy-games/mario-auto.html delete mode 100644 archive/legacy-code/legacy-games/mario-debug.html delete mode 100644 archive/legacy-code/legacy-games/mario-simple.html delete mode 100644 archive/legacy-code/legacy-games/mario-test.html delete mode 100644 archive/legacy-code/legacy-games/mario.html delete mode 100644 archive/legacy-code/legacy-games/memory.html delete mode 100644 archive/legacy-code/legacy-games/play.html delete mode 100644 archive/legacy-code/legacy-games/quick-play-new.html delete mode 100644 archive/legacy-code/legacy-games/quick-play.html delete mode 100644 archive/legacy-code/legacy-games/retro.html delete mode 100644 archive/legacy-code/legacy-games/snake-demo.html delete mode 100644 archive/legacy-code/legacy-games/snake.html delete mode 100644 archive/legacy-code/legacy-games/spades.html delete mode 100644 archive/legacy-code/legacy-games/test-live.html delete mode 100644 archive/legacy-code/legacy-games/tetris.html delete mode 100644 archive/legacy-code/legacy-games/topup.html delete mode 100644 archive/legacy-code/legacy-games/zelda-simple.html delete mode 100644 archive/legacy-code/legacy-games/zelda-test.html delete mode 100644 archive/legacy-code/legacy-games/zelda.html delete mode 100644 archive/legacy-code/main.js delete mode 100644 archive/legacy-code/play-timer-integration.js delete mode 100644 archive/legacy-code/play-timer.js delete mode 100644 archive/legacy-code/program/.gitignore delete mode 100644 archive/legacy-code/program/.prettierignore delete mode 100644 archive/legacy-code/program/Anchor.toml delete mode 100644 archive/legacy-code/program/Cargo.lock delete mode 100644 archive/legacy-code/program/Cargo.toml delete mode 100644 archive/legacy-code/program/README.md delete mode 100644 archive/legacy-code/program/migrations/deploy.ts delete mode 100644 archive/legacy-code/program/package.json delete mode 100644 archive/legacy-code/program/programs/counter/Cargo.toml delete mode 100644 archive/legacy-code/program/programs/counter/Xargo.toml delete mode 100644 archive/legacy-code/program/programs/counter/src/lib.rs delete mode 100644 archive/legacy-code/program/tests/counter.ts delete mode 100644 archive/legacy-code/program/tsconfig.json delete mode 100644 archive/legacy-code/schema.json delete mode 100644 archive/legacy-code/server.js delete mode 100644 archive/legacy-code/spades-engine.js delete mode 100644 archive/legacy-code/spades-game.js delete mode 100644 archive/legacy-code/test-critical-path.js delete mode 100644 archive/legacy-code/test-endpoints.mjs delete mode 100644 archive/legacy-code/test-thorough.js delete mode 100644 archive/legacy-code/update-backend-config.js delete mode 100644 archive/legacy-code/web3-rewards.js delete mode 100644 archive/legacy-code/wrangler.toml delete mode 100644 archive/portfolio-old/portfolio/404.html delete mode 100644 archive/portfolio-old/portfolio/MichaelSFlahertyResume.pdf delete mode 100644 archive/portfolio-old/portfolio/about.html delete mode 100644 archive/portfolio-old/portfolio/assets/logo.svg delete mode 100644 archive/portfolio-old/portfolio/assets/style.css delete mode 100644 archive/portfolio-old/portfolio/blog/index.html delete mode 100644 archive/portfolio-old/portfolio/index.html delete mode 100644 archive/portfolio-old/portfolio/pdfjs/build/pdf.js delete mode 100644 archive/portfolio-old/portfolio/pdfjs/build/pdf.sandbox.js delete mode 100644 archive/portfolio-old/portfolio/pdfjs/build/pdf.worker.js delete mode 100644 archive/portfolio-old/portfolio/pdfjs/viewer.html delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/78-EUC-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/78-EUC-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/78-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/78-RKSJ-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/78-RKSJ-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/78-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/78ms-RKSJ-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/78ms-RKSJ-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/83pv-RKSJ-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/90ms-RKSJ-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/90ms-RKSJ-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/90msp-RKSJ-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/90msp-RKSJ-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/90pv-RKSJ-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/90pv-RKSJ-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Add-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Add-RKSJ-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Add-RKSJ-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Add-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-CNS1-0.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-CNS1-1.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-CNS1-2.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-CNS1-3.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-CNS1-4.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-CNS1-5.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-CNS1-6.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-CNS1-UCS2.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-GB1-0.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-GB1-1.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-GB1-2.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-GB1-3.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-GB1-4.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-GB1-5.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-GB1-UCS2.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-Japan1-0.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-Japan1-1.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-Japan1-2.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-Japan1-3.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-Japan1-4.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-Japan1-5.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-Japan1-6.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-Japan1-UCS2.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-Korea1-0.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-Korea1-1.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-Korea1-2.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Adobe-Korea1-UCS2.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/B5-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/B5-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/B5pc-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/B5pc-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/CNS-EUC-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/CNS-EUC-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/CNS1-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/CNS1-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/CNS2-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/CNS2-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/ETHK-B5-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/ETHK-B5-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/ETen-B5-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/ETen-B5-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/ETenms-B5-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/ETenms-B5-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/EUC-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/EUC-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Ext-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Ext-RKSJ-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Ext-RKSJ-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Ext-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GB-EUC-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GB-EUC-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GB-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GB-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBK-EUC-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBK-EUC-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBK2K-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBK2K-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBKp-EUC-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBKp-EUC-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBT-EUC-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBT-EUC-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBT-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBT-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBTpc-EUC-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBTpc-EUC-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBpc-EUC-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/GBpc-EUC-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/HKdla-B5-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/HKdla-B5-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/HKdlb-B5-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/HKdlb-B5-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/HKgccs-B5-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/HKgccs-B5-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/HKm314-B5-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/HKm314-B5-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/HKm471-B5-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/HKm471-B5-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/HKscs-B5-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/HKscs-B5-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Hankaku.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Hiragana.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/KSC-EUC-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/KSC-EUC-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/KSC-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/KSC-Johab-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/KSC-Johab-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/KSC-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/KSCms-UHC-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/KSCms-UHC-HW-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/KSCms-UHC-HW-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/KSCms-UHC-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/KSCpc-EUC-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/KSCpc-EUC-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Katakana.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/LICENSE delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/NWP-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/NWP-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/RKSJ-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/RKSJ-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/Roman.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniCNS-UCS2-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniCNS-UCS2-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniCNS-UTF16-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniCNS-UTF16-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniCNS-UTF32-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniCNS-UTF32-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniCNS-UTF8-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniCNS-UTF8-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniGB-UCS2-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniGB-UCS2-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniGB-UTF16-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniGB-UTF16-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniGB-UTF32-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniGB-UTF32-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniGB-UTF8-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniGB-UTF8-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS-UCS2-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS-UCS2-HW-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS-UCS2-HW-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS-UCS2-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS-UTF16-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS-UTF16-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS-UTF32-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS-UTF32-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS-UTF8-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS-UTF8-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS2004-UTF16-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS2004-UTF16-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS2004-UTF32-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS2004-UTF32-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS2004-UTF8-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJIS2004-UTF8-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJISPro-UCS2-HW-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJISPro-UCS2-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJISPro-UTF8-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJISX0213-UTF32-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJISX0213-UTF32-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJISX02132004-UTF32-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniJISX02132004-UTF32-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniKS-UCS2-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniKS-UCS2-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniKS-UTF16-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniKS-UTF16-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniKS-UTF32-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniKS-UTF32-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniKS-UTF8-H.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/UniKS-UTF8-V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/V.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/cmaps/WP-Symbol.bcmap delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/annotation-check.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/annotation-comment.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/annotation-help.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/annotation-insert.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/annotation-key.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/annotation-newparagraph.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/annotation-noicon.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/annotation-note.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/annotation-paperclip.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/annotation-paragraph.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/annotation-pushpin.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/cursor-editorFreeText.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/cursor-editorInk.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/findbarButton-next.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/findbarButton-previous.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/loading-dark.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/loading-icon.gif delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/loading.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-documentProperties.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-firstPage.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-handTool.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-lastPage.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-rotateCcw.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-rotateCw.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-scrollHorizontal.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-scrollPage.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-scrollVertical.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-scrollWrapped.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-selectTool.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-spreadEven.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-spreadNone.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/secondaryToolbarButton-spreadOdd.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-bookmark.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-currentOutlineItem.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-download.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-editorFreeText.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-editorInk.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-menuArrow.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-openFile.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-pageDown.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-pageUp.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-presentationMode.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-print.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-search.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-secondaryToolbarToggle.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-sidebarToggle.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-viewAttachments.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-viewLayers.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-viewOutline.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-viewThumbnail.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-zoomIn.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/toolbarButton-zoomOut.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/treeitem-collapsed.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/images/treeitem-expanded.svg delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ach/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/af/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/an/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ar/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ast/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/az/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/be/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/bg/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/bn/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/bo/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/br/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/brx/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/bs/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ca/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/cak/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ckb/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/cs/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/cy/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/da/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/de/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/dsb/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/el/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/en-CA/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/en-GB/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/en-US/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/eo/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/es-AR/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/es-CL/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/es-ES/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/es-MX/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/et/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/eu/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/fa/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ff/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/fi/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/fr/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/fur/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/fy-NL/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ga-IE/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/gd/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/gl/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/gn/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/gu-IN/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/he/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/hi-IN/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/hr/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/hsb/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/hu/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/hy-AM/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/hye/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ia/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/id/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/is/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/it/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ja/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ka/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/kab/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/kk/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/km/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/kn/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ko/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/lij/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/lo/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/locale.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/lt/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ltg/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/lv/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/meh/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/mk/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/mr/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ms/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/my/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/nb-NO/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ne-NP/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/nl/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/nn-NO/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/oc/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/pa-IN/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/pl/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/pt-BR/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/pt-PT/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/rm/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ro/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ru/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/sat/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/sc/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/scn/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/sco/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/si/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/sk/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/skr/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/sl/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/son/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/sq/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/sr/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/sv-SE/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/szl/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ta/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/te/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/tg/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/th/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/tl/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/tr/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/trs/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/uk/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/ur/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/uz/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/vi/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/wo/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/xh/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/zh-CN/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/locale/zh-TW/viewer.properties delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitDingbats.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitFixed.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitFixedBold.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitFixedBoldItalic.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitFixedItalic.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitSans.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitSansBold.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitSansBoldItalic.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitSansItalic.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitSerif.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitSerifBold.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitSerifBoldItalic.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitSerifItalic.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/FoxitSymbol.pfb delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/LICENSE_FOXIT delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/LICENSE_LIBERATION delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/LiberationSans-Bold.ttf delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/LiberationSans-BoldItalic.ttf delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/LiberationSans-Italic.ttf delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/standard_fonts/LiberationSans-Regular.ttf delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/viewer.css delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/viewer.html delete mode 100644 archive/portfolio-old/portfolio/pdfjs/web/viewer.js delete mode 100644 archive/portfolio-old/portfolio/projects.html delete mode 100644 archive/portfolio-old/portfolio/resume-images/page-01-1200.webp delete mode 100644 archive/portfolio-old/portfolio/resume-images/page-01-40-1200.webp delete mode 100644 archive/portfolio-old/portfolio/resume-images/page-01-40-480.webp delete mode 100644 archive/portfolio-old/portfolio/resume-images/page-01-40-800.webp delete mode 100644 archive/portfolio-old/portfolio/resume-images/page-01-40.webp delete mode 100644 archive/portfolio-old/portfolio/resume-images/page-01-480.webp delete mode 100644 archive/portfolio-old/portfolio/resume-images/page-01-800.webp delete mode 100644 archive/portfolio-old/portfolio/resume-images/page-01.png delete mode 100644 archive/portfolio-old/portfolio/resume-images/page-01.webp delete mode 100644 archive/portfolio-old/portfolio/robots.txt delete mode 100644 archive/portfolio-old/portfolio/sitemap.xml delete mode 100644 data/custom-llm/summary.json delete mode 100644 data/custom-llm/train.jsonl delete mode 100644 data/external-datasets/kalshi-seed.jsonl delete mode 100644 data/external-datasets/live-market-delta-2026-03-10.jsonl delete mode 100644 data/external-datasets/live-market-delta-2026-03-11.jsonl delete mode 100644 data/external-datasets/xai-grok-image-capabilities.jsonl delete mode 100644 data/external-datasets/xai-grok-trading-visual-prompts.jsonl delete mode 100644 data/tradebot/eval-history.jsonl delete mode 100644 data/tradebot/eval-responses.jsonl delete mode 100644 data/tradebot/eval-score.json delete mode 100644 data/tradebot/eval-suite.jsonl delete mode 100644 data/tradebot/manifest.json delete mode 100644 data/tradebot/train.chat.jsonl delete mode 100644 data/tradebot/train.raw.jsonl delete mode 100644 data/tradebot/validation.chat.jsonl delete mode 100644 data/tradebot/validation.raw.jsonl delete mode 100644 db/supabase/AI_TRAINING_POST_DEPLOY_RUNBOOK.md delete mode 100644 deploy/namecheap/README.md delete mode 100644 deploy/namecheap/SERVER_SETUP_COPYPASTA.sh delete mode 100644 deploy/namecheap/bootstrap-server.sh delete mode 100644 deploy/namecheap/deploy-remote.sh delete mode 100644 deploy/namecheap/env.production.example delete mode 100644 deploy/namecheap/nginx.tradehax.conf delete mode 100644 deploy/namecheap/setup-cron.sh delete mode 100644 docs/AI_DYNASTY_PHASE3_PHASE4_PLAN.md delete mode 100644 docs/AI_HUB_MAKEOVER_PLAN.md delete mode 100644 docs/COMPETITIVE_ROADMAP_TIER1_VISION.md delete mode 100644 docs/HF_FINE_TUNING_WORKFLOW.md delete mode 100644 docs/HIVEMIND_FOUNDATION.md delete mode 100644 docs/IDE_PIPELINE_SYNC_WORKFLOW.md delete mode 100644 docs/PERSONAL_TRADING_ASSISTANT_SETUP.md delete mode 100644 docs/PRODUCTION_READINESS_ACTION_PLAN.md delete mode 100644 index.html delete mode 100644 k8s/ai-micro-deployment.yaml delete mode 100644 k8s/ai-micro-hpa.yaml delete mode 100644 k8s/deployment.yaml delete mode 100644 k8s/ingress.yaml delete mode 100644 k8s/nginx-ingress.yaml delete mode 100644 main/.dockerignore delete mode 100644 main/.env.production delete mode 100644 main/.github/workflows/ci-cd.yml delete mode 100644 main/.github/workflows/deploy-multi-project.yml delete mode 100644 main/.github/workflows/deploy-notify.yml delete mode 100644 main/.github/workflows/deploy-production.yml delete mode 100644 main/.github/workflows/social-media-sync.yml delete mode 100644 main/.github/workflows/unified-mcp-deploy.yml delete mode 100644 main/.gitignore delete mode 100644 main/.gitlensrc.json delete mode 100644 main/.gitlog_check.txt delete mode 100644 main/.gitstatus_check.txt delete mode 100644 main/.husky/pre-push delete mode 100644 main/.oauth-disabled.json delete mode 100644 main/.project-id delete mode 100644 main/.renovaterc.json delete mode 100644 main/404_DIAGNOSIS_ALTERNATIVE.md delete mode 100644 main/404_FIX_DEPLOYED.md delete mode 100644 main/404_RESOLVED_FINAL.md delete mode 100644 main/AGENTS.md delete mode 100644 main/AI_CENSORSHIP_REMOVED_GUITAR_PRICING.md delete mode 100644 main/AI_FOUNDATION_ENHANCEMENT_STRATEGY.md delete mode 100644 main/API_AUTONOMOUS_PUSH_COMPLETE.md delete mode 100644 main/API_CONNECTIONS_INVENTORY.md delete mode 100644 main/BACKUP_ENDPOINTS_CONFIGURED.md delete mode 100644 main/BLANK_PAGE_FIXED.txt delete mode 100644 main/CLEANUP_COMPLETE_VERIFICATION.md delete mode 100644 main/CLEAN_REBUILD_COMPLETE.md delete mode 100644 main/CLEAN_WORKSPACE_SETUP.md delete mode 100644 main/COMPLETE_CLEANUP_CHECKLIST.md delete mode 100644 main/COMPLETE_DELIVERABLES.md delete mode 100644 main/COMPLETE_FILE_REFERENCE.md delete mode 100644 main/COMPLETE_SETUP_SUMMARY.md delete mode 100644 main/COMPLETION_CERTIFICATE_RUNTIME_PATCH.md delete mode 100644 main/COMPLETION_SUMMARY.md delete mode 100644 main/DEEP_INSPECTION_REPORT.md delete mode 100644 main/DELIVERABLES.md delete mode 100644 main/DELIVERABLES_RUNTIME_VALIDATION.md delete mode 100644 main/DEPLOY.bat delete mode 100644 main/DEPLOYMENT.md delete mode 100644 main/DEPLOYMENT_BLOCKED_BILLING.md delete mode 100644 main/DEPLOYMENT_COMPLETE.md delete mode 100644 main/DEPLOYMENT_COMPLETE_AI_UNLOCKED.txt delete mode 100644 main/DEPLOYMENT_COMPLETE_BYPASS.md delete mode 100644 main/DEPLOYMENT_COMPLETE_RUNTIME_VALIDATION.md delete mode 100644 main/DEPLOYMENT_COMPLETE_SUCCESS.md delete mode 100644 main/DEPLOYMENT_CONFIGURATION_COMPLETE.md delete mode 100644 main/DEPLOYMENT_FINAL.md delete mode 100644 main/DEPLOYMENT_FINAL_STATUS.md delete mode 100644 main/DEPLOYMENT_FINAL_SWEEP.md delete mode 100644 main/DEPLOYMENT_LIVE_SUCCESS.md delete mode 100644 main/DEPLOYMENT_MISMATCH_REPORT.md delete mode 100644 main/DEPLOYMENT_PHASE_COMPLETE.md delete mode 100644 main/DEPLOYMENT_QUICKSTART.md delete mode 100644 main/DEPLOYMENT_READY.md delete mode 100644 main/DEPLOYMENT_READY_GO_LIVE.md delete mode 100644 main/DEPLOYMENT_READY_STATUS.md delete mode 100644 main/DEPLOYMENT_RUNTIME_VALIDATION_PATCH.md delete mode 100644 main/DEPLOYMENT_STATUS.md delete mode 100644 main/DEPLOYMENT_STATUS_FINAL.md delete mode 100644 main/DEPLOYMENT_STATUS_GUITAR_PRICING.txt delete mode 100644 main/DEPLOYMENT_SYNC_EXPLAINED.md delete mode 100644 main/DEPLOYMENT_TRADEHAX_NET.md delete mode 100644 main/DEPLOY_QUICK_REF.md delete mode 100644 main/DISABLE_PASSWORD_PROTECTION.md delete mode 100644 main/DOCKER_COMPOSE_STATUS.md delete mode 100644 main/DOCUMENTATION_INDEX.md delete mode 100644 main/DOCUMENTATION_INDEX_RUNTIME_PATCH.md delete mode 100644 main/DOMAIN_PIPELINE_ARCHITECTURE.md delete mode 100644 main/Dockerfile delete mode 100644 main/Documentation/API_CONNECTION_GUIDE.md delete mode 100644 main/Documentation/COMPETITOR_DEEP_DIVE_2026.md delete mode 100644 main/Documentation/DEPLOY_ONE_COMMAND.md delete mode 100644 main/Documentation/HF_TOKEN_SETUP.md delete mode 100644 main/Documentation/LEGAL_GUARDRAILS.md delete mode 100644 main/Documentation/MOBILE_WEB_OPTIMIZATION.md delete mode 100644 main/Documentation/OSS_COMPONENT_SHORTLIST.md delete mode 100644 main/Documentation/SOCIAL_MEDIA_INTEGRATION.md delete mode 100644 main/Documentation/SOCIAL_MEDIA_MCP_GUIDE.md delete mode 100644 main/Documentation/SOCIAL_MEDIA_MCP_IMPLEMENTATION.md delete mode 100644 main/Documentation/SOCIAL_MEDIA_QUICK_REFERENCE.md delete mode 100644 main/ENDPOINT_FIX_INSTRUCTIONS.md delete mode 100644 main/ENTERPRISE_DEVELOPMENT_STRATEGY.md delete mode 100644 main/EXECUTION_CHECKLIST_ALL_TARGETS.md delete mode 100644 main/EXECUTION_READY.md delete mode 100644 main/EXECUTIVE_SUMMARY_PHASE_1.md delete mode 100644 main/EXECUTIVE_SUMMARY_RUNTIME_PATCH.md delete mode 100644 main/FILES_CREATED_SUMMARY.md delete mode 100644 main/FINAL_CLEANUP_REPORT_2026.md delete mode 100644 main/FINAL_DELIVERABLES_LIST.md delete mode 100644 main/FINAL_DEPLOYMENT_CHECKLIST.md delete mode 100644 main/FINAL_DEPLOYMENT_STEPS.md delete mode 100644 main/FINAL_STATUS_GUITAR_PRICING_DEPLOYED.txt delete mode 100644 main/GITHUB_REPOSITORY_GUIDE.md delete mode 100644 main/GITLENS_SETUP_GUIDE.md delete mode 100644 main/GIT_COMMIT_PAPER_TRADING.md delete mode 100644 main/GPT_Trading_Assistant_API_Spec.md delete mode 100644 main/GPT_Trading_Assistant_Deployment_Checklist.md delete mode 100644 main/GPT_Trading_Assistant_Integration_Architecture.md delete mode 100644 main/GPT_Trading_Assistant_Production_Validation.md delete mode 100644 main/GPT_Trading_Assistant_Signal_Alert_Logic.md delete mode 100644 main/GPT_Trading_Assistant_UIUX_Wireframe.md delete mode 100644 main/GROK_4_SETUP_COMPLETE.md delete mode 100644 main/GROWTH_CAPABILITIES_ROADMAP.md delete mode 100644 main/GUITAR_PRICING_CONFIG_CREATED.md delete mode 100644 main/GUITAR_PRICING_UPDATE.md delete mode 100644 main/GUITAR_PRICING_UPDATE_NEEDED.md delete mode 100644 main/HF_TOKEN_SETUP_COMPLETE.md delete mode 100644 main/HIGH_VALUE_TARGETS_IMPLEMENTATION.md delete mode 100644 main/IDE_DEVELOPMENT_SETUP.md delete mode 100644 main/IMPLEMENTATION_COMPLETE.md delete mode 100644 main/IMPLEMENTATION_COMPLETE_RUNTIME_VALIDATION.md delete mode 100644 main/IMPLEMENTATION_QUICK_REF.md delete mode 100644 main/IMPLEMENTATION_STATUS_TARGETS_1_2_3.md delete mode 100644 main/IMPROVEMENTS_SUMMARY.md delete mode 100644 main/INSPECTION_SUMMARY.md delete mode 100644 main/INTELLIJ_IDEA_SETUP.md delete mode 100644 main/JAVA_21_COMPLETION_RECORD.md delete mode 100644 main/JAVA_21_FINAL_SUMMARY.txt delete mode 100644 main/JAVA_21_LTS_UPGRADE.md delete mode 100644 main/JAVA_21_QUICK_START.md delete mode 100644 main/JAVA_21_UPGRADE_EXECUTION_SUMMARY.md delete mode 100644 main/JAVA_21_UPGRADE_INDEX.md delete mode 100644 main/JAVA_DEPLOYMENT_COMPLETE.md delete mode 100644 main/JAVA_UPGRADE_STATUS.txt delete mode 100644 main/LIVEPASS_CHECKLIST.md delete mode 100644 main/LIVEPASS_DEPLOYMENT_REPORT.md delete mode 100644 main/LIVEPASS_FINAL_REPORT.txt delete mode 100644 main/LIVEPASS_REPORT.txt delete mode 100644 main/MASTERS_SUBMISSION_PACKAGE.md delete mode 100644 main/MCP_UNIFIED_ENVIRONMENT.md delete mode 100644 main/MISSING_SECRETS_CHECKLIST.md delete mode 100644 main/MULTI_PROJECT_DEPLOYMENT.md delete mode 100644 main/MULTI_PROJECT_UPDATE.md delete mode 100644 main/Makefile delete mode 100644 main/NEURAL_ENGINE_DEPLOYMENT.md delete mode 100644 main/NEURAL_ENGINE_FINAL_SUMMARY.md delete mode 100644 main/NEURAL_ENGINE_INDEX.md delete mode 100644 main/NEURAL_ENGINE_INTEGRATION_GUIDE.md delete mode 100644 main/NEURAL_ENGINE_MANIFEST.md delete mode 100644 main/NEURAL_ENGINE_README.md delete mode 100644 main/NEURAL_HUB_DEPLOYED.txt delete mode 100644 main/NEURAL_HUB_INTEGRATION_GUIDE.md delete mode 100644 main/OPTIMIZATION_DEPLOYMENT_COMPLETE.md delete mode 100644 main/PAPER_TRADING_COMPLETE.md delete mode 100644 main/PAPER_TRADING_MODE.md delete mode 100644 main/PAPER_TRADING_QUICKSTART.md delete mode 100644 main/PAPER_TRADING_SUMMARY.md delete mode 100644 main/PHASE_1_ARCHITECTURE.md delete mode 100644 main/PHASE_1_COMPLETE_SUMMARY.md delete mode 100644 main/PHASE_1_COMPLETION_SUMMARY.md delete mode 100644 main/PHASE_1_DELIVERABLES_MANIFEST.md delete mode 100644 main/PHASE_1_DEPLOYMENT_MANIFEST.md delete mode 100644 main/PHASE_1_INDEX.md delete mode 100644 main/PHASE_1_QUICK_START.md delete mode 100644 main/PHASE_1_README.md delete mode 100644 main/PHASE_1_STATUS_REPORT.md delete mode 100644 main/POLYCLAW_INTEGRATION_SUMMARY.md delete mode 100644 main/POLYMARKET_AI_SETUP.md delete mode 100644 main/POLYMARKET_DEPLOYMENT_CHECKLIST.md delete mode 100644 main/POLYMARKET_QUICK_REFERENCE.md delete mode 100644 main/POLYMARKET_TRADING_ASSISTANT_GUIDE.md delete mode 100644 main/POLYMARKET_TRADING_ASSISTANT_IMPLEMENTATION.md delete mode 100644 main/PRE_DEPLOYMENT_VERIFICATION_FINAL.md delete mode 100644 main/PRODUCTION_COMPLETE.md delete mode 100644 main/PRODUCTION_ENV_SETUP.md delete mode 100644 main/PRODUCTION_FINAL_SUCCESS.txt delete mode 100644 main/PRODUCTION_INDEX.md delete mode 100644 main/PRODUCTION_READY.md delete mode 100644 main/PRODUCTION_STATUS_FINAL.md delete mode 100644 main/QUICK_ACTION_REQUIRED.txt delete mode 100644 main/QUICK_AUTH_REFERENCE.txt delete mode 100644 main/QUICK_FIX.txt delete mode 100644 main/QUICK_REFERENCE.md delete mode 100644 main/README.md delete mode 100644 main/README_INDEX.md delete mode 100644 main/README_POLYMARKET_INDEX.md delete mode 100644 main/READY_TO_DEPLOY.md delete mode 100644 main/READY_TO_DEPLOY.txt delete mode 100644 main/REBUILD_STATUS.md delete mode 100644 main/REMOVE_AUTHENTICATION_BARRIERS.md delete mode 100644 main/REPOSITORY_CONSOLIDATION_PLAN.md delete mode 100644 main/REPOSITORY_SYNC_REPORT.md delete mode 100644 main/RUNTIME_ERROR_FIX.md delete mode 100644 main/RUNTIME_FIX_STATUS.md delete mode 100644 main/RUNTIME_VALIDATION_PATCH.md delete mode 100644 main/SECURE_DEPLOYMENT_GUIDE.md delete mode 100644 main/SECURITY_AUDIT.md delete mode 100644 main/SECURITY_DEPLOYMENT_REPORT.md delete mode 100644 main/START_HERE_SUPABASE.md delete mode 100644 main/STREAMLINED_ARCHITECTURE_2026.md delete mode 100644 main/SUCCESS_SUMMARY.txt delete mode 100644 main/SUPABASE_CONNECTION_STATUS.md delete mode 100644 main/SUPABASE_DEPLOYMENT_CHECKLIST.md delete mode 100644 main/SUPABASE_DOCUMENTATION_INDEX.md delete mode 100644 main/SUPABASE_INITIALIZATION_GUIDE.md delete mode 100644 main/SUPABASE_QUICK_START.md delete mode 100644 main/SUPABASE_STATUS_SUMMARY.md delete mode 100644 main/SUPABASE_TROUBLESHOOTING.md delete mode 100644 main/TRADEHAXAI_DOMAINS_FIXED.md delete mode 100644 main/TRADEHAX_MERGE_COMPLETE.md delete mode 100644 main/TRADEHAX_SYSTEM_OVERVIEW.md delete mode 100644 main/TRADING_GATE_IMPLEMENTATION_COMPLETE.md delete mode 100644 main/TWO_PROJECT_QUICK_START.md delete mode 100644 main/VERCEL_REDIRECT_FIX.md delete mode 100644 main/VITE_REACT_PLUGIN_FIX.md delete mode 100644 main/WHATS_NEW.md delete mode 100644 main/XAI_GROK_SUPABASE_COMPLETION.txt delete mode 100644 main/api/account.ts delete mode 100644 main/api/neural-hub.ts delete mode 100644 main/automate-bypass.ps1 delete mode 100644 main/automate-bypass.sh delete mode 100644 main/automation/ENDPOINT_SYNC_EXTENSION.md delete mode 100644 main/automation/README.md delete mode 100644 main/automation/sync-all.ps1 delete mode 100644 main/automation/sync-all.sh delete mode 100644 main/automation/sync-config.json delete mode 100644 main/clear-oauth.bat delete mode 100644 main/deploy.ps1 delete mode 100644 main/deploy.sh delete mode 100644 main/disable-vercel-protection.ps1 delete mode 100644 main/docker-compose.prod.yml delete mode 100644 main/docker-compose.social.yml delete mode 100644 main/docker-compose.staging.yml delete mode 100644 main/docker-compose.yml delete mode 100644 main/gitstatus.out delete mode 100644 main/k8s-app.yaml delete mode 100644 main/k8s-configmap.yaml delete mode 100644 main/k8s-namespace.yaml delete mode 100644 main/k8s-postgres.yaml delete mode 100644 main/k8s-redis.yaml delete mode 100644 main/k8s-secret.yaml delete mode 100644 main/lib/trading/neural-hub-pipeline.ts delete mode 100644 main/lib/trading/technical-indicators.ts delete mode 100644 main/main-mirror.bfg-report/2026-03-19/21-38-08/cache-stats.txt delete mode 100644 main/main-mirror.bfg-report/2026-03-19/21-38-08/deleted-files.txt delete mode 100644 main/main-mirror.bfg-report/2026-03-19/21-38-08/object-id-map.old-new.txt delete mode 100644 main/main-mirror.bfg-report/2026-03-19/21-38-08/protected-dirt/466ea925-HEAD.csv delete mode 100644 main/main-mirror/HEAD delete mode 100644 main/main-mirror/config delete mode 100644 main/main-mirror/description delete mode 100644 main/main-mirror/hooks/applypatch-msg.sample delete mode 100644 main/main-mirror/hooks/commit-msg.sample delete mode 100644 main/main-mirror/hooks/fsmonitor-watchman.sample delete mode 100644 main/main-mirror/hooks/post-update.sample delete mode 100644 main/main-mirror/hooks/pre-applypatch.sample delete mode 100644 main/main-mirror/hooks/pre-commit.sample delete mode 100644 main/main-mirror/hooks/pre-merge-commit.sample delete mode 100644 main/main-mirror/hooks/pre-push.sample delete mode 100644 main/main-mirror/hooks/pre-rebase.sample delete mode 100644 main/main-mirror/hooks/pre-receive.sample delete mode 100644 main/main-mirror/hooks/prepare-commit-msg.sample delete mode 100644 main/main-mirror/hooks/push-to-checkout.sample delete mode 100644 main/main-mirror/hooks/sendemail-validate.sample delete mode 100644 main/main-mirror/hooks/update.sample delete mode 100644 main/main-mirror/info/exclude delete mode 100644 main/main-mirror/info/refs delete mode 100644 main/main-mirror/objects/00/3eecba8ff3cb69cec471960a10a407a1271439 delete mode 100644 main/main-mirror/objects/06/5b6fa7208f2c77f6fe35f70a5e6795f2c09a41 delete mode 100644 main/main-mirror/objects/06/808593691c4e6f425f26a3d2cc35f991e0408a delete mode 100644 main/main-mirror/objects/09/62e7adc7d67fb3b72b907bc1e27e908ad8b009 delete mode 100644 main/main-mirror/objects/0b/38d6ccfa259181678733b97e5e885600b6e25c delete mode 100644 main/main-mirror/objects/0c/f70da50da63b394bb68a6bfe350e815d7ace7e delete mode 100644 main/main-mirror/objects/12/03415b1c8bbd1aa11f51046e5b2691253b6033 delete mode 100644 main/main-mirror/objects/12/4be7b93a635139aa7597f4fb65be3bb4a58366 delete mode 100644 main/main-mirror/objects/19/222792d099e51926233b92a84267cdb867cadb delete mode 100644 main/main-mirror/objects/19/94a60e797f0219eafb1d4e16b0075df6494a6c delete mode 100644 main/main-mirror/objects/19/a9cade88b929bab94a8ade3e06af00c486ed1b delete mode 100644 main/main-mirror/objects/1b/ca9c58e1d92d7092a35cd40966e23a73224b8f delete mode 100644 main/main-mirror/objects/21/8b166608b167a399b9d6311727062fd0d1344d delete mode 100644 main/main-mirror/objects/23/e8d5920cd3a487c51f8c8652fc24b96a1c0d3b delete mode 100644 main/main-mirror/objects/27/096b987130fb4d3bff5184362171f8c1d24d5d delete mode 100644 main/main-mirror/objects/27/3e2cee3a6aa512dcb247001bbd6d3bc69343dd delete mode 100644 main/main-mirror/objects/28/b4f23c6746499faf8d7e66a56bcee46764950a delete mode 100644 main/main-mirror/objects/28/bb6c1bcc4928db6efc9d1026a9d3d0b24883d3 delete mode 100644 main/main-mirror/objects/29/bab6382b23aca9e06ec4f8325cc425551bf35b delete mode 100644 main/main-mirror/objects/2c/06f0c79b62cd712b19a9b9bdca61bdc947b56e delete mode 100644 main/main-mirror/objects/2c/3f6a543c6f82ec50b815f34e5d292ec1a783bb delete mode 100644 main/main-mirror/objects/2e/8863610da03beb038ce8581ed1f9651d871b55 delete mode 100644 main/main-mirror/objects/2e/940f5859cb3adf79524a10218088046e7e8d4a delete mode 100644 main/main-mirror/objects/2f/90f7cb20a4fe018900497119dc86aead028fe5 delete mode 100644 main/main-mirror/objects/36/b915345c2653533d70a22763dd22a530d74209 delete mode 100644 main/main-mirror/objects/38/5b2c962d58b8788bdc1698b98be23410fbec16 delete mode 100644 main/main-mirror/objects/39/9b9dbb75db9f32c6381eeace15c027fb6b8758 delete mode 100644 main/main-mirror/objects/3a/2e7677566e90a9771a0e08c3ad1e32ac16009c delete mode 100644 main/main-mirror/objects/3a/8741a733ad1b69cb2b77bcdb8edc1afc9f5285 delete mode 100644 main/main-mirror/objects/3b/a5510bd1494f65a4fc8e4ef82ed9f9841885fd delete mode 100644 main/main-mirror/objects/3e/7ea060464862001c2808ee601a7c6c8f903e58 delete mode 100644 main/main-mirror/objects/3e/d6cdfa19b07f1eaf179c236b3ffe84950494c6 delete mode 100644 main/main-mirror/objects/42/2e1b180b9809d8d9baa5306f94b0a93aa51933 delete mode 100644 main/main-mirror/objects/42/ccf4926295f112fc18960da28ea3f109b70fba delete mode 100644 main/main-mirror/objects/43/3e2e332ae566a1e7a0baa6e1eec59786038a21 delete mode 100644 main/main-mirror/objects/44/a2257afab779ac41596c1768da4c88ccb5af0c delete mode 100644 main/main-mirror/objects/45/46d9f642c0f75c8d1f3ed4327bc101a349c0ef delete mode 100644 main/main-mirror/objects/45/daf039fd6440f2969f6b5dc1addbdafb82782e delete mode 100644 main/main-mirror/objects/48/170dffff1d58d92283118f177d471de81d74cc delete mode 100644 main/main-mirror/objects/49/6fed147f4e19d615cdd5ae456393914c7e3e71 delete mode 100644 main/main-mirror/objects/4a/2a9798896bc55f4d16361c1a8cc8e7660345b0 delete mode 100644 main/main-mirror/objects/4b/7b5e8b5cd925b529e33b3ddcf70ae957f3a56e delete mode 100644 main/main-mirror/objects/4c/01af4e73de4f97d5793f8dd0e428d73b071e5c delete mode 100644 main/main-mirror/objects/4e/bfb772e61c7ac99cc21470248851dd55bfd33c delete mode 100644 main/main-mirror/objects/4e/ee1afc4dd779f51b2ccacef0fd716a20137ef4 delete mode 100644 main/main-mirror/objects/51/58ef8528a012765cc812cce933a30bd9d17e66 delete mode 100644 main/main-mirror/objects/51/9434a699d13cd1d455bfaed056d254e5c93128 delete mode 100644 main/main-mirror/objects/54/12f18a192aca7ff86ed3a10bdb05f683d23439 delete mode 100644 main/main-mirror/objects/54/cc041820b8160747ba1955d65b8575f6ba0242 delete mode 100644 main/main-mirror/objects/60/a60701bdc664a912abeea00c7420564ac1bfa4 delete mode 100644 main/main-mirror/objects/63/1965896effa264513d93e433c7dbd32c96cb3c delete mode 100644 main/main-mirror/objects/63/7c707e57a7536cd743ff845c6411cdd9775fc3 delete mode 100644 main/main-mirror/objects/65/7830fcc116ebf7c791f0992ed36c76d151a2c4 delete mode 100644 main/main-mirror/objects/67/d7962b39755f8530ac20612d8d605d74416d60 delete mode 100644 main/main-mirror/objects/6c/c16a42bfb6b6aac5d2b12d9e2d373f78fecedf delete mode 100644 main/main-mirror/objects/6c/d06796134219596ebf80de6fac6e085aca0599 delete mode 100644 main/main-mirror/objects/6d/c886d0d29116cabfdeedab7ef52a1c6a76a88b delete mode 100644 main/main-mirror/objects/6d/f9a6b57a70fc22ad75091207865679f39d2c23 delete mode 100644 main/main-mirror/objects/6e/92517c3e37899b772f127525744dd51de74ad5 delete mode 100644 main/main-mirror/objects/6f/0a452e45f13914696bc9ecb3efdd0f1c846303 delete mode 100644 main/main-mirror/objects/6f/3384dfb18e81383e04053beac22e50113db83c delete mode 100644 main/main-mirror/objects/70/af36f87703221d24d5cbd007c162344a812817 delete mode 100644 main/main-mirror/objects/70/eebc9e37e5ac4f7bb7266fecb5837afc217f2b delete mode 100644 main/main-mirror/objects/74/130995f1cd9003495bf089340119f0433363f0 delete mode 100644 main/main-mirror/objects/77/9b0a8f939e221604aeda5662575909710755e1 delete mode 100644 main/main-mirror/objects/77/b18e309b7d739eb3fd0362e6f57503bc6ef6da delete mode 100644 main/main-mirror/objects/7a/ff9b237757f5e37e707f2146d7084b2d10e872 delete mode 100644 main/main-mirror/objects/7c/25397ed2ce79d357f944a3279379f5e370b0d1 delete mode 100644 main/main-mirror/objects/7c/28fcf90cfa4aaea467989c3e0288eea9376fa0 delete mode 100644 main/main-mirror/objects/7f/e0ca6e0bae63b68ba4b7eb16046b1d1027526b delete mode 100644 main/main-mirror/objects/80/f7f5746535147ec95d230e594938c263d1a8ec delete mode 100644 main/main-mirror/objects/82/cc7c74282761a4c4d12055418c847166340fa8 delete mode 100644 main/main-mirror/objects/85/b7c9507cdef9680e3cbe5c7d7362effbb1fb8b delete mode 100644 main/main-mirror/objects/86/39ca18629b33cea057c325c25dcfb38ddf8d0b delete mode 100644 main/main-mirror/objects/8a/44c269bc6b183f0aa2bccfcdedbb6b98be02bd delete mode 100644 main/main-mirror/objects/8a/ecc18ca80a0c65daf19584c07ae230316a72c4 delete mode 100644 main/main-mirror/objects/8d/5ce7bb872a545c7887551b392762eb12330f9e delete mode 100644 main/main-mirror/objects/92/c91329412402c8f2d9e7b08d075c68f34d4614 delete mode 100644 main/main-mirror/objects/95/00320bbd629425b4c0867cac3e1a937d761606 delete mode 100644 main/main-mirror/objects/97/340fe497fe4ae28e292455b30632002f8d6e15 delete mode 100644 main/main-mirror/objects/98/8f0a4219ad5735b93ef3cdee20c0cb27ce7133 delete mode 100644 main/main-mirror/objects/99/86ac0b011ec560596928d735a9582192f10a41 delete mode 100644 main/main-mirror/objects/99/af85d49323d6409911a8935b434be4f060888d delete mode 100644 main/main-mirror/objects/99/ef15fa0abf5ceeb9fa9d1a3419b554f67034d3 delete mode 100644 main/main-mirror/objects/9a/03be0853620564b5758af9a4589b0a74c26420 delete mode 100644 main/main-mirror/objects/9b/29e11770c8bb24d26b12b143ae7764d17ae935 delete mode 100644 main/main-mirror/objects/9e/4f506d6380f1e2429af74a0a941464e1448093 delete mode 100644 main/main-mirror/objects/a0/be556c4288cd6f67280031a2db89b42a749fc4 delete mode 100644 main/main-mirror/objects/a3/157042b291a3c3f4c7722cb8755f01530af649 delete mode 100644 main/main-mirror/objects/a3/8e203859492b0709bc5cf67e1619cc4f82847c delete mode 100644 main/main-mirror/objects/a4/fbee9f24d85e1da2b3e55de7bef22f766fd7ee delete mode 100644 main/main-mirror/objects/a5/b50d6c1eb9ebbee7dccea0464c3b7246a62f95 delete mode 100644 main/main-mirror/objects/a7/0db4601cd1ec93ea8247ce56d66ec21fd4e714 delete mode 100644 main/main-mirror/objects/a7/1dcdbc520e2c9079ad2bb720e07f77b5e94901 delete mode 100644 main/main-mirror/objects/a7/69f992e33271546971b923e93edea5686292a2 delete mode 100644 main/main-mirror/objects/a9/b1bf9e099efdfb0dc4315e402b623b090ef24c delete mode 100644 main/main-mirror/objects/ab/266486ddacfcf9696a2e9d12c237bc0a207fa5 delete mode 100644 main/main-mirror/objects/ad/04a318c83e50de24a8e0d23c5a00c965a46aaa delete mode 100644 main/main-mirror/objects/ae/8254018caefc48b99f096d6d5519e8f4532c7e delete mode 100644 main/main-mirror/objects/ae/ed71f2a2d6965f5bad744a9494cf6c5a755a31 delete mode 100644 main/main-mirror/objects/b0/e0409e479cc13360fcc161c7a7216e874d1d00 delete mode 100644 main/main-mirror/objects/b1/e363ffe0ea7dde37cfa08faef4adb7854a13ed delete mode 100644 main/main-mirror/objects/b3/2c6178326823982caf921ab99943cca38f02a2 delete mode 100644 main/main-mirror/objects/b3/fc8539054f2a35c50b907bd38cde2d8d41c1c0 delete mode 100644 main/main-mirror/objects/b5/2b0f767431cfcd161dbd0e0acaa3f2c2f0c472 delete mode 100644 main/main-mirror/objects/b5/5db5fcf98d023acfcddaf2c3f86adadcb9bf50 delete mode 100644 main/main-mirror/objects/b6/75c02205643d1afb7e46bf97b96905fa13034e delete mode 100644 main/main-mirror/objects/b7/0539ad0335fc4a36e339bb2ce06e7249dfabc8 delete mode 100644 main/main-mirror/objects/bb/ee7ec051ae591f967867750be61c7fa30fb2be delete mode 100644 main/main-mirror/objects/bc/b94978e069cd16103462d1e297e90e00be4f5c delete mode 100644 main/main-mirror/objects/bc/cf790361bc9507009678cc42a69f79687394bb delete mode 100644 main/main-mirror/objects/be/2708f4bd765f5542ba57799507a5e4867df6e3 delete mode 100644 main/main-mirror/objects/bf/fc491da862bb98cc42b5f709933a9095186089 delete mode 100644 main/main-mirror/objects/c0/f72b2942a3f9bd3f642951bfac0ba433451a3e delete mode 100644 main/main-mirror/objects/c2/293d1069bbf0d97063785c656dfac5b6f2d827 delete mode 100644 main/main-mirror/objects/c2/76155bcfde4958f5ce902cbb92aa548a33fba4 delete mode 100644 main/main-mirror/objects/c4/c659bac10b109e9f1067744d86c1e70013167f delete mode 100644 main/main-mirror/objects/c6/cfb72e9ff06729f3f8df7dc6493fd37a523d15 delete mode 100644 main/main-mirror/objects/c9/1ba877ce1ce741c005953ac8ed521e440d5858 delete mode 100644 main/main-mirror/objects/c9/d518bdaacae31432ccc9cae4022e0e869b8951 delete mode 100644 main/main-mirror/objects/ca/616ee242f33aee743b7a8c69a78c1f32dfb2cb delete mode 100644 main/main-mirror/objects/ca/fafe74d169561f79030a95fd1d4bcf4f68a23d delete mode 100644 main/main-mirror/objects/cc/7834430c00243f6036de34c8e0e0fc1af882cd delete mode 100644 main/main-mirror/objects/d0/da88de2951629ed5e2c4c2658e8ecdfbfa2a39 delete mode 100644 main/main-mirror/objects/d3/86bbc22f3214ec99edf80c7c9034f934bc89bc delete mode 100644 main/main-mirror/objects/d8/7659dc0ff4e4c7b921e32491e4aa5024f34323 delete mode 100644 main/main-mirror/objects/d9/4286cd163d8be8d454e5dc3ad64e3f607a2b2e delete mode 100644 main/main-mirror/objects/df/f9ad7b5416f26639d84dcab693dad958142998 delete mode 100644 main/main-mirror/objects/df/fc07ab175f88cedcadacb17634ff79ba033e67 delete mode 100644 main/main-mirror/objects/e4/e63b8660ddb43f67d7df6445e8445afa9d4bef delete mode 100644 main/main-mirror/objects/e6/283afde6bbc68e915e4f811c890ef5f55871c2 delete mode 100644 main/main-mirror/objects/e6/5f2b4417d4b09c69b4561f737d6a0a29d2be7f delete mode 100644 main/main-mirror/objects/e6/b4d8d2ce4ea22f041921acc8983190fa707dc7 delete mode 100644 main/main-mirror/objects/ea/6e63924817991b082b10790f0b5dff2ba73c15 delete mode 100644 main/main-mirror/objects/eb/6800e85b8bc6ab026772529c6eef099206408e delete mode 100644 main/main-mirror/objects/ec/cf0e699c14ba04aaab09a95281ac108feef59b delete mode 100644 main/main-mirror/objects/ed/a89e31c99389393d1385be79e1cf1b5fa9e597 delete mode 100644 main/main-mirror/objects/ef/c256f6bdf83b4fa29efb555f9f8d75d0179468 delete mode 100644 main/main-mirror/objects/f4/75ce01c41e6d93c6a9fd31e18b8b8e01869207 delete mode 100644 main/main-mirror/objects/f5/0314085d75e02979e2ee804d5e8579f896025a delete mode 100644 main/main-mirror/objects/f5/f244f3785aa683bccf28cc00be28a86890af71 delete mode 100644 main/main-mirror/objects/f7/13fc08933ff89d4a12370187085128f9aebcac delete mode 100644 main/main-mirror/objects/fb/bce8db47a1571734321dc7eddd644326c37ef0 delete mode 100644 main/main-mirror/objects/fd/41b194d469c2fae5324b91a26a6303e9ce7d95 delete mode 100644 main/main-mirror/objects/info/commit-graph delete mode 100644 main/main-mirror/objects/info/packs delete mode 100644 main/main-mirror/objects/pack/pack-d2ffd67431a565d80139e551925012f85b278dbd.bitmap delete mode 100644 main/main-mirror/objects/pack/pack-d2ffd67431a565d80139e551925012f85b278dbd.idx delete mode 100644 main/main-mirror/objects/pack/pack-d2ffd67431a565d80139e551925012f85b278dbd.pack delete mode 100644 main/main-mirror/objects/pack/pack-d2ffd67431a565d80139e551925012f85b278dbd.rev delete mode 100644 main/main-mirror/packed-refs delete mode 100644 main/models.yml delete mode 100644 main/models/neuralHubConfig.ts delete mode 100644 main/models/tradingModels.ts delete mode 100644 main/nginx.conf delete mode 100644 main/open-tradehax.ps1 delete mode 100644 main/scripts/api-connection-manager.js delete mode 100644 main/scripts/auto-push-commit-deploy.sh delete mode 100644 main/scripts/autonomous-push-orchestrator.js delete mode 100644 main/scripts/check-dns.sh delete mode 100644 main/scripts/clear-oauth-permanent.js delete mode 100644 main/scripts/deploy-tradehax.ps1 delete mode 100644 main/scripts/deploy-tradehax.sh delete mode 100644 main/scripts/deploy.bat delete mode 100644 main/scripts/deploy.sh delete mode 100644 main/scripts/endpoint-health-check.js delete mode 100644 main/scripts/env-autofill.js delete mode 100644 main/scripts/full-deploy-check.sh delete mode 100644 main/scripts/full-health-check.ps1 delete mode 100644 main/scripts/full-health-check.sh delete mode 100644 main/scripts/livepass-validation.ps1 delete mode 100644 main/scripts/mcp-orchestrator.js delete mode 100644 main/scripts/namecheap-dns-copypasta.bat delete mode 100644 main/scripts/namecheap-dns-copypasta.ps1 delete mode 100644 main/scripts/namecheap-dns-copypasta.sh delete mode 100644 main/scripts/nmap-check.sh delete mode 100644 main/scripts/predeploy-check.js delete mode 100644 main/scripts/setup-gitlens.js delete mode 100644 main/scripts/social-mcp-servers.js delete mode 100644 main/scripts/social-sync-orchestrator.js delete mode 100644 main/scripts/supabase-migrate-ci.sh delete mode 100644 main/scripts/supabase-migrate.ps1 delete mode 100644 main/scripts/supabase-migrate.sh delete mode 100644 main/scripts/sync-env-to-vercel.js delete mode 100644 main/scripts/trading-gate-integration-check.mjs delete mode 100644 main/scripts/trading-gate-smoke-test.mjs delete mode 100644 main/scripts/unified-mcp-push.js delete mode 100644 main/scripts/validate-runtime-patch.mjs delete mode 100644 main/scripts/verify-hf-token.js delete mode 100644 main/scripts/webhook-handler.js delete mode 100644 main/services/dataService.ts delete mode 100644 main/services/llmService.ts delete mode 100644 main/services/neuralBotService.ts delete mode 100644 main/setup-neural-engine.ps1 delete mode 100644 main/setup-neural-engine.sh delete mode 100644 main/supabase_schema.sql delete mode 100644 main/sync-env-to-vercel.ps1 delete mode 100644 main/test-openai-api.ps1 delete mode 100644 main/test-openai-api.sh delete mode 100644 main/tmp_alias_ascii.txt delete mode 100644 main/tmp_alias_backup_before_fix.txt delete mode 100644 main/tmp_alias_by_org.txt delete mode 100644 main/tmp_alias_check_after_set.txt delete mode 100644 main/tmp_alias_current.txt delete mode 100644 main/tmp_alias_default_scope.txt delete mode 100644 main/tmp_alias_hackai_after_deploy.txt delete mode 100644 main/tmp_alias_hackavelliz.txt delete mode 100644 main/tmp_alias_live_check.txt delete mode 100644 main/tmp_alias_ls_hackavelliz_latest.txt delete mode 100644 main/tmp_alias_owner7282.txt delete mode 100644 main/tmp_alias_set_tradehax.txt delete mode 100644 main/tmp_alias_set_tradehax_fix.txt delete mode 100644 main/tmp_alias_set_tradehax_now.txt delete mode 100644 main/tmp_alias_set_www.txt delete mode 100644 main/tmp_alias_set_www_now.txt delete mode 100644 main/tmp_alias_set_www_tradehax_fix.txt delete mode 100644 main/tmp_alias_tradehax_fresh.txt delete mode 100644 main/tmp_alias_tradehax_net.txt delete mode 100644 main/tmp_alias_tradehax_recovery.txt delete mode 100644 main/tmp_alias_tradehaxai_me_fresh.txt delete mode 100644 main/tmp_alias_tradehaxai_me_recovery.txt delete mode 100644 main/tmp_alias_tradehaxai_tech_fresh.txt delete mode 100644 main/tmp_alias_tradehaxai_tech_recovery.txt delete mode 100644 main/tmp_alias_www_tradehax_fresh.txt delete mode 100644 main/tmp_alias_www_tradehax_net.txt delete mode 100644 main/tmp_alias_www_tradehax_recovery.txt delete mode 100644 main/tmp_alias_www_tradehaxai_me_fresh.txt delete mode 100644 main/tmp_alias_www_tradehaxai_me_recovery.txt delete mode 100644 main/tmp_alias_www_tradehaxai_tech_fresh.txt delete mode 100644 main/tmp_alias_www_tradehaxai_tech_recovery.txt delete mode 100644 main/tmp_apex_body_ascii.html delete mode 100644 main/tmp_apex_final_check.txt delete mode 100644 main/tmp_apex_headers_ascii.txt delete mode 100644 main/tmp_branch_after_push.txt delete mode 100644 main/tmp_branch_latest2.txt delete mode 100644 main/tmp_branch_ship.txt delete mode 100644 main/tmp_build_after_fix.txt delete mode 100644 main/tmp_build_after_rewrites.txt delete mode 100644 main/tmp_build_check.txt delete mode 100644 main/tmp_build_exit.txt delete mode 100644 main/tmp_build_exit2.txt delete mode 100644 main/tmp_build_final2.txt delete mode 100644 main/tmp_build_now.txt delete mode 100644 main/tmp_build_now_status.txt delete mode 100644 main/tmp_build_output.txt delete mode 100644 main/tmp_build_release.txt delete mode 100644 main/tmp_build_release_status.txt delete mode 100644 main/tmp_build_status.txt delete mode 100644 main/tmp_commit_ship_result.txt delete mode 100644 main/tmp_curl_test.txt delete mode 100644 main/tmp_deploy_after_rewrites.txt delete mode 100644 main/tmp_deploy_compare.txt delete mode 100644 main/tmp_deploy_compare_bypass.txt delete mode 100644 main/tmp_deploy_fix.txt delete mode 100644 main/tmp_deploy_latest.txt delete mode 100644 main/tmp_deploy_ship_result.txt delete mode 100644 main/tmp_deploy_ship_result2.txt delete mode 100644 main/tmp_deploy_status.txt delete mode 100644 main/tmp_deploy_tradehax_scope_hackai.txt delete mode 100644 main/tmp_deploy_tradehax_scope_hackai_afterfix.txt delete mode 100644 main/tmp_deployment_check.txt delete mode 100644 main/tmp_dist_check.txt delete mode 100644 main/tmp_dns_guide_after_patch.txt delete mode 100644 main/tmp_dns_verify_after_patch.txt delete mode 100644 main/tmp_dns_verify_after_patch2.txt delete mode 100644 main/tmp_domain_inspect_tradehax_net.txt delete mode 100644 main/tmp_domain_inspect_tradehaxai_me.txt delete mode 100644 main/tmp_domain_inspect_tradehaxai_tech.txt delete mode 100644 main/tmp_domain_inspect_www_tradehax_net.txt delete mode 100644 main/tmp_domain_inspect_www_tradehaxai_me.txt delete mode 100644 main/tmp_domain_inspect_www_tradehaxai_tech.txt delete mode 100644 main/tmp_endpoint_check.txt delete mode 100644 main/tmp_endpoint_diagnosis.json delete mode 100644 main/tmp_endpoint_headers.txt delete mode 100644 main/tmp_endpoint_matrix.txt delete mode 100644 main/tmp_endpoint_matrix_after_fix.txt delete mode 100644 main/tmp_endpoint_test_308.txt delete mode 100644 main/tmp_endpoint_titles.txt delete mode 100644 main/tmp_final_page_check.html delete mode 100644 main/tmp_fix_build.txt delete mode 100644 main/tmp_fix_status.txt delete mode 100644 main/tmp_fresh_deploy_body.html delete mode 100644 main/tmp_fresh_deploy_capture.txt delete mode 100644 main/tmp_git_commit.txt delete mode 100644 main/tmp_git_commit_redirect_fix.txt delete mode 100644 main/tmp_git_push.txt delete mode 100644 main/tmp_git_status.txt delete mode 100644 main/tmp_git_status_check2.txt delete mode 100644 main/tmp_health_current.txt delete mode 100644 main/tmp_health_file_check.txt delete mode 100644 main/tmp_live_asset_body.txt delete mode 100644 main/tmp_live_asset_headers.txt delete mode 100644 main/tmp_log_after_push.txt delete mode 100644 main/tmp_log_latest2.txt delete mode 100644 main/tmp_ls_default_after_unlink.txt delete mode 100644 main/tmp_ls_default_scope.txt delete mode 100644 main/tmp_ls_hackai_after_deploy.txt delete mode 100644 main/tmp_ls_hackavelliz.txt delete mode 100644 main/tmp_ls_owner7282.txt delete mode 100644 main/tmp_manual_vercel_prod_deploy.txt delete mode 100644 main/tmp_namecheap_commit_a0548da.txt delete mode 100644 main/tmp_namecheap_commit_ed451f2.txt delete mode 100644 main/tmp_namecheap_last_commit.txt delete mode 100644 main/tmp_neural_deploy.txt delete mode 100644 main/tmp_npm_list.txt delete mode 100644 main/tmp_pro_build.txt delete mode 100644 main/tmp_pro_build_status.txt delete mode 100644 main/tmp_pro_deploy.txt delete mode 100644 main/tmp_projects_by_org.txt delete mode 100644 main/tmp_projects_default_after_unlink.txt delete mode 100644 main/tmp_projects_ls.txt delete mode 100644 main/tmp_projects_owner7282.txt delete mode 100644 main/tmp_push_ship_result.txt delete mode 100644 main/tmp_rebuild_output.txt delete mode 100644 main/tmp_recovery_build.txt delete mode 100644 main/tmp_recovery_build_status.txt delete mode 100644 main/tmp_recovery_deploy.txt delete mode 100644 main/tmp_redirect_chain_health.txt delete mode 100644 main/tmp_redirect_chain_tradehax.txt delete mode 100644 main/tmp_release_build.txt delete mode 100644 main/tmp_release_build_status.txt delete mode 100644 main/tmp_release_check.txt delete mode 100644 main/tmp_release_deploy.txt delete mode 100644 main/tmp_remote_ship.txt delete mode 100644 main/tmp_security_build.txt delete mode 100644 main/tmp_security_build_status.txt delete mode 100644 main/tmp_security_deploy.txt delete mode 100644 main/tmp_smoke_test_output.txt delete mode 100644 main/tmp_status_ship.txt delete mode 100644 main/tmp_teams.txt delete mode 100644 main/tmp_teams_full.txt delete mode 100644 main/tmp_tradehax_blank_test.html delete mode 100644 main/tmp_tradehax_http.txt delete mode 100644 main/tmp_tradehax_post_rollback.txt delete mode 100644 main/tmp_tradehaxai_me_test.txt delete mode 100644 main/tmp_tradehaxai_tech_test.txt delete mode 100644 main/tmp_vercel_alias_hackai.txt delete mode 100644 main/tmp_vercel_alias_hackavelliz.txt delete mode 100644 main/tmp_vercel_deploy_final.txt delete mode 100644 main/tmp_vercel_deploy_force.txt delete mode 100644 main/tmp_vercel_deploy_result.txt delete mode 100644 main/tmp_vercel_deploy_result2.txt delete mode 100644 main/tmp_vercel_deployments_default.txt delete mode 100644 main/tmp_vercel_dir_listing.txt delete mode 100644 main/tmp_vercel_env_list.txt delete mode 100644 main/tmp_vercel_link_now.txt delete mode 100644 main/tmp_vercel_ls_current.txt delete mode 100644 main/tmp_vercel_projects_default.txt delete mode 100644 main/tmp_vercel_projects_hackai_after_unlink.txt delete mode 100644 main/tmp_vercel_teams.txt delete mode 100644 main/tmp_vercel_token_state.txt delete mode 100644 main/tmp_vercel_version_check.txt delete mode 100644 main/tmp_vercel_whoami.txt delete mode 100644 main/tmp_vercel_whoami_after_unlink.txt delete mode 100644 main/tmp_vercel_whoami_now.txt delete mode 100644 main/tmp_vercel_whoami_ship.txt delete mode 100644 main/tmp_verify_build.txt delete mode 100644 main/tmp_verify_build_status.txt delete mode 100644 main/tmp_web_build_output.txt delete mode 100644 main/tmp_web_dir_force.txt delete mode 100644 main/tmp_white_build_check.txt delete mode 100644 main/tmp_whoami.txt delete mode 100644 main/tmp_whoami_after_logout.txt delete mode 100644 main/tmp_www_after_fresh_body.html delete mode 100644 main/tmp_www_after_fresh_headers.txt delete mode 100644 main/tmp_www_endpoint_check.txt delete mode 100644 main/tmp_www_headers_ascii.txt delete mode 100644 main/tmp_www_health_final_check.txt delete mode 100644 main/tmp_www_recovery_asset_headers.txt delete mode 100644 main/tmp_www_recovery_body.html delete mode 100644 main/tmp_www_recovery_headers.txt delete mode 100644 main/tmp_www_root_headers_body.txt delete mode 100644 main/tmp_www_tradehax_post_rollback.txt delete mode 100644 main/tmp_www_tradehax_test.html delete mode 100644 main/tmp_xai_commit.txt delete mode 100644 main/tmp_xai_commit_exit.txt delete mode 160000 main/vendor/massive-client-js delete mode 100644 main/vercel-deploy-webhook.json delete mode 100644 main/verify-deployment.ps1 delete mode 100644 main/verify-tradehax-deployment.sh delete mode 100644 qodana.yaml delete mode 100644 skills-lock.json delete mode 100644 sync-log.txt delete mode 100644 tools/generate_placeholders.py delete mode 100644 tools/generate_responsive.py delete mode 100644 tools/png_to_webp.py delete mode 100644 tradehax-cleanup.sh delete mode 100644 tradehax-crypto-education.jsonl delete mode 100644 tradehax-domain-priority.jsonl delete mode 100644 tradehax-repo/DEPLOYMENT_SUMMARY.md delete mode 100644 tradehax-repo/WINDOWS_SETUP.md delete mode 100644 tradehax-repo/scripts/fine-tune-requirements-windows.txt delete mode 100644 tradehax-repo/scripts/install-hf-deps-windows.ps1 delete mode 100644 tradehax-training-expanded.jsonl delete mode 100644 tradehaxai.code-workspace diff --git a/.agents/skills/supabase-postgres-best-practices/AGENTS.md b/.agents/skills/supabase-postgres-best-practices/AGENTS.md deleted file mode 100644 index a7baf445..00000000 --- a/.agents/skills/supabase-postgres-best-practices/AGENTS.md +++ /dev/null @@ -1,68 +0,0 @@ -# Supabase Postgres Best Practices - -## Structure - -``` -supabase-postgres-best-practices/ - SKILL.md # Main skill file - read this first - AGENTS.md # This navigation guide - CLAUDE.md # Symlink to AGENTS.md - references/ # Detailed reference files -``` - -## Usage - -1. Read `SKILL.md` for the main skill instructions -2. Browse `references/` for detailed documentation on specific topics -3. Reference files are loaded on-demand - read only what you need - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.agents/skills/supabase-postgres-best-practices/CLAUDE.md b/.agents/skills/supabase-postgres-best-practices/CLAUDE.md deleted file mode 100644 index 47dc3e3d..00000000 --- a/.agents/skills/supabase-postgres-best-practices/CLAUDE.md +++ /dev/null @@ -1 +0,0 @@ -AGENTS.md \ No newline at end of file diff --git a/.agents/skills/supabase-postgres-best-practices/README.md b/.agents/skills/supabase-postgres-best-practices/README.md deleted file mode 100644 index f1a374e1..00000000 --- a/.agents/skills/supabase-postgres-best-practices/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# Supabase Postgres Best Practices - Contributor Guide - -This skill contains Postgres performance optimization references optimized for -AI agents and LLMs. It follows the [Agent Skills Open Standard](https://agentskills.io/). - -## Quick Start - -```bash -# From repository root -npm install - -# Validate existing references -npm run validate - -# Build AGENTS.md -npm run build -``` - -## Creating a New Reference - -1. **Choose a section prefix** based on the category: - - `query-` Query Performance (CRITICAL) - - `conn-` Connection Management (CRITICAL) - - `security-` Security & RLS (CRITICAL) - - `schema-` Schema Design (HIGH) - - `lock-` Concurrency & Locking (MEDIUM-HIGH) - - `data-` Data Access Patterns (MEDIUM) - - `monitor-` Monitoring & Diagnostics (LOW-MEDIUM) - - `advanced-` Advanced Features (LOW) - -2. **Copy the template**: - ```bash - cp references/_template.md references/query-your-reference-name.md - ``` - -3. **Fill in the content** following the template structure - -4. **Validate and build**: - ```bash - npm run validate - npm run build - ``` - -5. **Review** the generated `AGENTS.md` - -## Skill Structure - -``` -skills/supabase-postgres-best-practices/ -β”œβ”€β”€ SKILL.md # Agent-facing skill manifest (Agent Skills spec) -β”œβ”€β”€ AGENTS.md # [GENERATED] Compiled references document -β”œβ”€β”€ README.md # This file -└── references/ - β”œβ”€β”€ _template.md # Reference template - β”œβ”€β”€ _sections.md # Section definitions - β”œβ”€β”€ _contributing.md # Writing guidelines - └── *.md # Individual references - -packages/skills-build/ -β”œβ”€β”€ src/ # Generic build system source -└── package.json # NPM scripts -``` - -## Reference File Structure - -See `references/_template.md` for the complete template. Key elements: - -````markdown ---- -title: Clear, Action-Oriented Title -impact: CRITICAL|HIGH|MEDIUM-HIGH|MEDIUM|LOW-MEDIUM|LOW -impactDescription: Quantified benefit (e.g., "10-100x faster") -tags: relevant, keywords ---- - -## [Title] - -[1-2 sentence explanation] - -**Incorrect (description):** - -```sql --- Comment explaining what's wrong -[Bad SQL example] -``` -```` - -**Correct (description):** - -```sql --- Comment explaining why this is better -[Good SQL example] -``` - -``` -## Writing Guidelines - -See `references/_contributing.md` for detailed guidelines. Key principles: - -1. **Show concrete transformations** - "Change X to Y", not abstract advice -2. **Error-first structure** - Show the problem before the solution -3. **Quantify impact** - Include specific metrics (10x faster, 50% smaller) -4. **Self-contained examples** - Complete, runnable SQL -5. **Semantic naming** - Use meaningful names (users, email), not (table1, col1) - -## Impact Levels - -| Level | Improvement | Examples | -|-------|-------------|----------| -| CRITICAL | 10-100x | Missing indexes, connection exhaustion | -| HIGH | 5-20x | Wrong index types, poor partitioning | -| MEDIUM-HIGH | 2-5x | N+1 queries, RLS optimization | -| MEDIUM | 1.5-3x | Redundant indexes, stale statistics | -| LOW-MEDIUM | 1.2-2x | VACUUM tuning, config tweaks | -| LOW | Incremental | Advanced patterns, edge cases | -``` diff --git a/.agents/skills/supabase-postgres-best-practices/SKILL.md b/.agents/skills/supabase-postgres-best-practices/SKILL.md deleted file mode 100644 index f80be156..00000000 --- a/.agents/skills/supabase-postgres-best-practices/SKILL.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -name: supabase-postgres-best-practices -description: Postgres performance optimization and best practices from Supabase. Use this skill when writing, reviewing, or optimizing Postgres queries, schema designs, or database configurations. -license: MIT -metadata: - author: supabase - version: "1.1.0" - organization: Supabase - date: January 2026 - abstract: Comprehensive Postgres performance optimization guide for developers using Supabase and Postgres. Contains performance rules across 8 categories, prioritized by impact from critical (query performance, connection management) to incremental (advanced features). Each rule includes detailed explanations, incorrect vs. correct SQL examples, query plan analysis, and specific performance metrics to guide automated optimization and code generation. ---- - -# Supabase Postgres Best Practices - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.agents/skills/supabase-postgres-best-practices/references/_contributing.md b/.agents/skills/supabase-postgres-best-practices/references/_contributing.md deleted file mode 100644 index 10de8ecb..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/_contributing.md +++ /dev/null @@ -1,171 +0,0 @@ -# Writing Guidelines for Postgres References - -This document provides guidelines for creating effective Postgres best -practice references that work well with AI agents and LLMs. - -## Key Principles - -### 1. Concrete Transformation Patterns - -Show exact SQL rewrites. Avoid philosophical advice. - -**Good:** "Use `WHERE id = ANY(ARRAY[...])` instead of -`WHERE id IN (SELECT ...)`" **Bad:** "Design good schemas" - -### 2. Error-First Structure - -Always show the problematic pattern first, then the solution. This trains agents -to recognize anti-patterns. - -```markdown -**Incorrect (sequential queries):** [bad example] - -**Correct (batched query):** [good example] -``` - -### 3. Quantified Impact - -Include specific metrics. Helps agents prioritize fixes. - -**Good:** "10x faster queries", "50% smaller index", "Eliminates N+1" -**Bad:** "Faster", "Better", "More efficient" - -### 4. Self-Contained Examples - -Examples should be complete and runnable (or close to it). Include `CREATE TABLE` -if context is needed. - -```sql --- Include table definition when needed for clarity -CREATE TABLE users ( - id bigint PRIMARY KEY, - email text NOT NULL, - deleted_at timestamptz -); - --- Now show the index -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; -``` - -### 5. Semantic Naming - -Use meaningful table/column names. Names carry intent for LLMs. - -**Good:** `users`, `email`, `created_at`, `is_active` -**Bad:** `table1`, `col1`, `field`, `flag` - ---- - -## Code Example Standards - -### SQL Formatting - -```sql --- Use lowercase keywords, clear formatting -CREATE INDEX CONCURRENTLY users_email_idx - ON users(email) - WHERE deleted_at IS NULL; - --- Not cramped or ALL CAPS -CREATE INDEX CONCURRENTLY USERS_EMAIL_IDX ON USERS(EMAIL) WHERE DELETED_AT IS NULL; -``` - -### Comments - -- Explain _why_, not _what_ -- Highlight performance implications -- Point out common pitfalls - -### Language Tags - -- `sql` - Standard SQL queries -- `plpgsql` - Stored procedures/functions -- `typescript` - Application code (when needed) -- `python` - Application code (when needed) - ---- - -## When to Include Application Code - -**Default: SQL Only** - -Most references should focus on pure SQL patterns. This keeps examples portable. - -**Include Application Code When:** - -- Connection pooling configuration -- Transaction management in application context -- ORM anti-patterns (N+1 in Prisma/TypeORM) -- Prepared statement usage - -**Format for Mixed Examples:** - -````markdown -**Incorrect (N+1 in application):** - -```typescript -for (const user of users) { - const posts = await db.query("SELECT * FROM posts WHERE user_id = $1", [ - user.id, - ]); -} -``` -```` - -**Correct (batch query):** - -```typescript -const posts = await db.query("SELECT * FROM posts WHERE user_id = ANY($1)", [ - userIds, -]); -``` - ---- - -## Impact Level Guidelines - -| Level | Improvement | Use When | -|-------|-------------|----------| -| **CRITICAL** | 10-100x | Missing indexes, connection exhaustion, sequential scans on large tables | -| **HIGH** | 5-20x | Wrong index types, poor partitioning, missing covering indexes | -| **MEDIUM-HIGH** | 2-5x | N+1 queries, inefficient pagination, RLS optimization | -| **MEDIUM** | 1.5-3x | Redundant indexes, query plan instability | -| **LOW-MEDIUM** | 1.2-2x | VACUUM tuning, configuration tweaks | -| **LOW** | Incremental | Advanced patterns, edge cases | - ---- - -## Reference Standards - -**Primary Sources:** - -- Official Postgres documentation -- Supabase documentation -- Postgres wiki -- Established blogs (2ndQuadrant, Crunchy Data) - -**Format:** - -```markdown -Reference: -[Postgres Indexes](https://www.postgresql.org/docs/current/indexes.html) -``` - ---- - -## Review Checklist - -Before submitting a reference: - -- [ ] Title is clear and action-oriented -- [ ] Impact level matches the performance gain -- [ ] impactDescription includes quantification -- [ ] Explanation is concise (1-2 sentences) -- [ ] Has at least 1 **Incorrect** SQL example -- [ ] Has at least 1 **Correct** SQL example -- [ ] SQL uses semantic naming -- [ ] Comments explain _why_, not _what_ -- [ ] Trade-offs mentioned if applicable -- [ ] Reference links included -- [ ] `npm run validate` passes -- [ ] `npm run build` generates correct output diff --git a/.agents/skills/supabase-postgres-best-practices/references/_sections.md b/.agents/skills/supabase-postgres-best-practices/references/_sections.md deleted file mode 100644 index 8ba57c23..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/_sections.md +++ /dev/null @@ -1,39 +0,0 @@ -# Section Definitions - -This file defines the rule categories for Postgres best practices. Rules are automatically assigned to sections based on their filename prefix. - -Take the examples below as pure demonstrative. Replace each section with the actual rule categories for Postgres best practices. - ---- - -## 1. Query Performance (query) -**Impact:** CRITICAL -**Description:** Slow queries, missing indexes, inefficient query plans. The most common source of Postgres performance issues. - -## 2. Connection Management (conn) -**Impact:** CRITICAL -**Description:** Connection pooling, limits, and serverless strategies. Critical for applications with high concurrency or serverless deployments. - -## 3. Security & RLS (security) -**Impact:** CRITICAL -**Description:** Row-Level Security policies, privilege management, and authentication patterns. - -## 4. Schema Design (schema) -**Impact:** HIGH -**Description:** Table design, index strategies, partitioning, and data type selection. Foundation for long-term performance. - -## 5. Concurrency & Locking (lock) -**Impact:** MEDIUM-HIGH -**Description:** Transaction management, isolation levels, deadlock prevention, and lock contention patterns. - -## 6. Data Access Patterns (data) -**Impact:** MEDIUM -**Description:** N+1 query elimination, batch operations, cursor-based pagination, and efficient data fetching. - -## 7. Monitoring & Diagnostics (monitor) -**Impact:** LOW-MEDIUM -**Description:** Using pg_stat_statements, EXPLAIN ANALYZE, metrics collection, and performance diagnostics. - -## 8. Advanced Features (advanced) -**Impact:** LOW -**Description:** Full-text search, JSONB optimization, PostGIS, extensions, and advanced Postgres features. diff --git a/.agents/skills/supabase-postgres-best-practices/references/_template.md b/.agents/skills/supabase-postgres-best-practices/references/_template.md deleted file mode 100644 index 91ace90e..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/_template.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Clear, Action-Oriented Title (e.g., "Use Partial Indexes for Filtered Queries") -impact: MEDIUM -impactDescription: 5-20x query speedup for filtered queries -tags: indexes, query-optimization, performance ---- - -## [Rule Title] - -[1-2 sentence explanation of the problem and why it matters. Focus on performance impact.] - -**Incorrect (describe the problem):** - -```sql --- Comment explaining what makes this slow/problematic -CREATE INDEX users_email_idx ON users(email); - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- This scans deleted records unnecessarily -``` - -**Correct (describe the solution):** - -```sql --- Comment explaining why this is better -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- Only indexes active users, 10x smaller index, faster queries -``` - -[Optional: Additional context, edge cases, or trade-offs] - -Reference: [Postgres Docs](https://www.postgresql.org/docs/current/) diff --git a/.agents/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md b/.agents/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md deleted file mode 100644 index 582cbeaa..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use tsvector for Full-Text Search -impact: MEDIUM -impactDescription: 100x faster than LIKE, with ranking support -tags: full-text-search, tsvector, gin, search ---- - -## Use tsvector for Full-Text Search - -LIKE with wildcards can't use indexes. Full-text search with tsvector is orders of magnitude faster. - -**Incorrect (LIKE pattern matching):** - -```sql --- Cannot use index, scans all rows -select * from articles where content like '%postgresql%'; - --- Case-insensitive makes it worse -select * from articles where lower(content) like '%postgresql%'; -``` - -**Correct (full-text search with tsvector):** - -```sql --- Add tsvector column and index -alter table articles add column search_vector tsvector - generated always as (to_tsvector('english', coalesce(title,'') || ' ' || coalesce(content,''))) stored; - -create index articles_search_idx on articles using gin (search_vector); - --- Fast full-text search -select * from articles -where search_vector @@ to_tsquery('english', 'postgresql & performance'); - --- With ranking -select *, ts_rank(search_vector, query) as rank -from articles, to_tsquery('english', 'postgresql') query -where search_vector @@ query -order by rank desc; -``` - -Search multiple terms: - -```sql --- AND: both terms required -to_tsquery('postgresql & performance') - --- OR: either term -to_tsquery('postgresql | mysql') - --- Prefix matching -to_tsquery('post:*') -``` - -Reference: [Full Text Search](https://supabase.com/docs/guides/database/full-text-search) diff --git a/.agents/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md b/.agents/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md deleted file mode 100644 index e3d261ea..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Index JSONB Columns for Efficient Querying -impact: MEDIUM -impactDescription: 10-100x faster JSONB queries with proper indexing -tags: jsonb, gin, indexes, json ---- - -## Index JSONB Columns for Efficient Querying - -JSONB queries without indexes scan the entire table. Use GIN indexes for containment queries. - -**Incorrect (no index on JSONB):** - -```sql -create table products ( - id bigint primary key, - attributes jsonb -); - --- Full table scan for every query -select * from products where attributes @> '{"color": "red"}'; -select * from products where attributes->>'brand' = 'Nike'; -``` - -**Correct (GIN index for JSONB):** - -```sql --- GIN index for containment operators (@>, ?, ?&, ?|) -create index products_attrs_gin on products using gin (attributes); - --- Now containment queries use the index -select * from products where attributes @> '{"color": "red"}'; - --- For specific key lookups, use expression index -create index products_brand_idx on products ((attributes->>'brand')); -select * from products where attributes->>'brand' = 'Nike'; -``` - -Choose the right operator class: - -```sql --- jsonb_ops (default): supports all operators, larger index -create index idx1 on products using gin (attributes); - --- jsonb_path_ops: only @> operator, but 2-3x smaller index -create index idx2 on products using gin (attributes jsonb_path_ops); -``` - -Reference: [JSONB Indexes](https://www.postgresql.org/docs/current/datatype-json.html#JSON-INDEXING) diff --git a/.agents/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md b/.agents/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md deleted file mode 100644 index 40b9cc50..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Configure Idle Connection Timeouts -impact: HIGH -impactDescription: Reclaim 30-50% of connection slots from idle clients -tags: connections, timeout, idle, resource-management ---- - -## Configure Idle Connection Timeouts - -Idle connections waste resources. Configure timeouts to automatically reclaim them. - -**Incorrect (connections held indefinitely):** - -```sql --- No timeout configured -show idle_in_transaction_session_timeout; -- 0 (disabled) - --- Connections stay open forever, even when idle -select pid, state, state_change, query -from pg_stat_activity -where state = 'idle in transaction'; --- Shows transactions idle for hours, holding locks -``` - -**Correct (automatic cleanup of idle connections):** - -```sql --- Terminate connections idle in transaction after 30 seconds -alter system set idle_in_transaction_session_timeout = '30s'; - --- Terminate completely idle connections after 10 minutes -alter system set idle_session_timeout = '10min'; - --- Reload configuration -select pg_reload_conf(); -``` - -For pooled connections, configure at the pooler level: - -```ini -# pgbouncer.ini -server_idle_timeout = 60 -client_idle_timeout = 300 -``` - -Reference: [Connection Timeouts](https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT) diff --git a/.agents/skills/supabase-postgres-best-practices/references/conn-limits.md b/.agents/skills/supabase-postgres-best-practices/references/conn-limits.md deleted file mode 100644 index cb3e400c..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/conn-limits.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Set Appropriate Connection Limits -impact: CRITICAL -impactDescription: Prevent database crashes and memory exhaustion -tags: connections, max-connections, limits, stability ---- - -## Set Appropriate Connection Limits - -Too many connections exhaust memory and degrade performance. Set limits based on available resources. - -**Incorrect (unlimited or excessive connections):** - -```sql --- Default max_connections = 100, but often increased blindly -show max_connections; -- 500 (way too high for 4GB RAM) - --- Each connection uses 1-3MB RAM --- 500 connections * 2MB = 1GB just for connections! --- Out of memory errors under load -``` - -**Correct (calculate based on resources):** - -```sql --- Formula: max_connections = (RAM in MB / 5MB per connection) - reserved --- For 4GB RAM: (4096 / 5) - 10 = ~800 theoretical max --- But practically, 100-200 is better for query performance - --- Recommended settings for 4GB RAM -alter system set max_connections = 100; - --- Also set work_mem appropriately --- work_mem * max_connections should not exceed 25% of RAM -alter system set work_mem = '8MB'; -- 8MB * 100 = 800MB max -``` - -Monitor connection usage: - -```sql -select count(*), state from pg_stat_activity group by state; -``` - -Reference: [Database Connections](https://supabase.com/docs/guides/platform/performance#connection-management) diff --git a/.agents/skills/supabase-postgres-best-practices/references/conn-pooling.md b/.agents/skills/supabase-postgres-best-practices/references/conn-pooling.md deleted file mode 100644 index e2ebd581..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/conn-pooling.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Use Connection Pooling for All Applications -impact: CRITICAL -impactDescription: Handle 10-100x more concurrent users -tags: connection-pooling, pgbouncer, performance, scalability ---- - -## Use Connection Pooling for All Applications - -Postgres connections are expensive (1-3MB RAM each). Without pooling, applications exhaust connections under load. - -**Incorrect (new connection per request):** - -```sql --- Each request creates a new connection --- Application code: db.connect() per request --- Result: 500 concurrent users = 500 connections = crashed database - --- Check current connections -select count(*) from pg_stat_activity; -- 487 connections! -``` - -**Correct (connection pooling):** - -```sql --- Use a pooler like PgBouncer between app and database --- Application connects to pooler, pooler reuses a small pool to Postgres - --- Configure pool_size based on: (CPU cores * 2) + spindle_count --- Example for 4 cores: pool_size = 10 - --- Result: 500 concurrent users share 10 actual connections -select count(*) from pg_stat_activity; -- 10 connections -``` - -Pool modes: - -- **Transaction mode**: connection returned after each transaction (best for most apps) -- **Session mode**: connection held for entire session (needed for prepared statements, temp tables) - -Reference: [Connection Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pooler) diff --git a/.agents/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md b/.agents/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md deleted file mode 100644 index 555547d8..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Use Prepared Statements Correctly with Pooling -impact: HIGH -impactDescription: Avoid prepared statement conflicts in pooled environments -tags: prepared-statements, connection-pooling, transaction-mode ---- - -## Use Prepared Statements Correctly with Pooling - -Prepared statements are tied to individual database connections. In transaction-mode pooling, connections are shared, causing conflicts. - -**Incorrect (named prepared statements with transaction pooling):** - -```sql --- Named prepared statement -prepare get_user as select * from users where id = $1; - --- In transaction mode pooling, next request may get different connection -execute get_user(123); --- ERROR: prepared statement "get_user" does not exist -``` - -**Correct (use unnamed statements or session mode):** - -```sql --- Option 1: Use unnamed prepared statements (most ORMs do this automatically) --- The query is prepared and executed in a single protocol message - --- Option 2: Deallocate after use in transaction mode -prepare get_user as select * from users where id = $1; -execute get_user(123); -deallocate get_user; - --- Option 3: Use session mode pooling (port 5432 vs 6543) --- Connection is held for entire session, prepared statements persist -``` - -Check your driver settings: - -```sql --- Many drivers use prepared statements by default --- Node.js pg: { prepare: false } to disable --- JDBC: prepareThreshold=0 to disable -``` - -Reference: [Prepared Statements with Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pool-modes) diff --git a/.agents/skills/supabase-postgres-best-practices/references/data-batch-inserts.md b/.agents/skills/supabase-postgres-best-practices/references/data-batch-inserts.md deleted file mode 100644 index 997947cb..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/data-batch-inserts.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Batch INSERT Statements for Bulk Data -impact: MEDIUM -impactDescription: 10-50x faster bulk inserts -tags: batch, insert, bulk, performance, copy ---- - -## Batch INSERT Statements for Bulk Data - -Individual INSERT statements have high overhead. Batch multiple rows in single statements or use COPY. - -**Incorrect (individual inserts):** - -```sql --- Each insert is a separate transaction and round trip -insert into events (user_id, action) values (1, 'click'); -insert into events (user_id, action) values (1, 'view'); -insert into events (user_id, action) values (2, 'click'); --- ... 1000 more individual inserts - --- 1000 inserts = 1000 round trips = slow -``` - -**Correct (batch insert):** - -```sql --- Multiple rows in single statement -insert into events (user_id, action) values - (1, 'click'), - (1, 'view'), - (2, 'click'), - -- ... up to ~1000 rows per batch - (999, 'view'); - --- One round trip for 1000 rows -``` - -For large imports, use COPY: - -```sql --- COPY is fastest for bulk loading -copy events (user_id, action, created_at) -from '/path/to/data.csv' -with (format csv, header true); - --- Or from stdin in application -copy events (user_id, action) from stdin with (format csv); -1,click -1,view -2,click -\. -``` - -Reference: [COPY](https://www.postgresql.org/docs/current/sql-copy.html) diff --git a/.agents/skills/supabase-postgres-best-practices/references/data-n-plus-one.md b/.agents/skills/supabase-postgres-best-practices/references/data-n-plus-one.md deleted file mode 100644 index 2109186f..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/data-n-plus-one.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Eliminate N+1 Queries with Batch Loading -impact: MEDIUM-HIGH -impactDescription: 10-100x fewer database round trips -tags: n-plus-one, batch, performance, queries ---- - -## Eliminate N+1 Queries with Batch Loading - -N+1 queries execute one query per item in a loop. Batch them into a single query using arrays or JOINs. - -**Incorrect (N+1 queries):** - -```sql --- First query: get all users -select id from users where active = true; -- Returns 100 IDs - --- Then N queries, one per user -select * from orders where user_id = 1; -select * from orders where user_id = 2; -select * from orders where user_id = 3; --- ... 97 more queries! - --- Total: 101 round trips to database -``` - -**Correct (single batch query):** - -```sql --- Collect IDs and query once with ANY -select * from orders where user_id = any(array[1, 2, 3, ...]); - --- Or use JOIN instead of loop -select u.id, u.name, o.* -from users u -left join orders o on o.user_id = u.id -where u.active = true; - --- Total: 1 round trip -``` - -Application pattern: - -```sql --- Instead of looping in application code: --- for user in users: db.query("SELECT * FROM orders WHERE user_id = $1", user.id) - --- Pass array parameter: -select * from orders where user_id = any($1::bigint[]); --- Application passes: [1, 2, 3, 4, 5, ...] -``` - -Reference: [N+1 Query Problem](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.agents/skills/supabase-postgres-best-practices/references/data-pagination.md b/.agents/skills/supabase-postgres-best-practices/references/data-pagination.md deleted file mode 100644 index 633d8393..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/data-pagination.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use Cursor-Based Pagination Instead of OFFSET -impact: MEDIUM-HIGH -impactDescription: Consistent O(1) performance regardless of page depth -tags: pagination, cursor, keyset, offset, performance ---- - -## Use Cursor-Based Pagination Instead of OFFSET - -OFFSET-based pagination scans all skipped rows, getting slower on deeper pages. Cursor pagination is O(1). - -**Incorrect (OFFSET pagination):** - -```sql --- Page 1: scans 20 rows -select * from products order by id limit 20 offset 0; - --- Page 100: scans 2000 rows to skip 1980 -select * from products order by id limit 20 offset 1980; - --- Page 10000: scans 200,000 rows! -select * from products order by id limit 20 offset 199980; -``` - -**Correct (cursor/keyset pagination):** - -```sql --- Page 1: get first 20 -select * from products order by id limit 20; --- Application stores last_id = 20 - --- Page 2: start after last ID -select * from products where id > 20 order by id limit 20; --- Uses index, always fast regardless of page depth - --- Page 10000: same speed as page 1 -select * from products where id > 199980 order by id limit 20; -``` - -For multi-column sorting: - -```sql --- Cursor must include all sort columns -select * from products -where (created_at, id) > ('2024-01-15 10:00:00', 12345) -order by created_at, id -limit 20; -``` - -Reference: [Pagination](https://supabase.com/docs/guides/database/pagination) diff --git a/.agents/skills/supabase-postgres-best-practices/references/data-upsert.md b/.agents/skills/supabase-postgres-best-practices/references/data-upsert.md deleted file mode 100644 index bc95e230..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/data-upsert.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use UPSERT for Insert-or-Update Operations -impact: MEDIUM -impactDescription: Atomic operation, eliminates race conditions -tags: upsert, on-conflict, insert, update ---- - -## Use UPSERT for Insert-or-Update Operations - -Using separate SELECT-then-INSERT/UPDATE creates race conditions. Use INSERT ... ON CONFLICT for atomic upserts. - -**Incorrect (check-then-insert race condition):** - -```sql --- Race condition: two requests check simultaneously -select * from settings where user_id = 123 and key = 'theme'; --- Both find nothing - --- Both try to insert -insert into settings (user_id, key, value) values (123, 'theme', 'dark'); --- One succeeds, one fails with duplicate key error! -``` - -**Correct (atomic UPSERT):** - -```sql --- Single atomic operation -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value, updated_at = now(); - --- Returns the inserted/updated row -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value -returning *; -``` - -Insert-or-ignore pattern: - -```sql --- Insert only if not exists (no update) -insert into page_views (page_id, user_id) -values (1, 123) -on conflict (page_id, user_id) do nothing; -``` - -Reference: [INSERT ON CONFLICT](https://www.postgresql.org/docs/current/sql-insert.html#SQL-ON-CONFLICT) diff --git a/.agents/skills/supabase-postgres-best-practices/references/lock-advisory.md b/.agents/skills/supabase-postgres-best-practices/references/lock-advisory.md deleted file mode 100644 index 572eaf0d..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/lock-advisory.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Use Advisory Locks for Application-Level Locking -impact: MEDIUM -impactDescription: Efficient coordination without row-level lock overhead -tags: advisory-locks, coordination, application-locks ---- - -## Use Advisory Locks for Application-Level Locking - -Advisory locks provide application-level coordination without requiring database rows to lock. - -**Incorrect (creating rows just for locking):** - -```sql --- Creating dummy rows to lock on -create table resource_locks ( - resource_name text primary key -); - -insert into resource_locks values ('report_generator'); - --- Lock by selecting the row -select * from resource_locks where resource_name = 'report_generator' for update; -``` - -**Correct (advisory locks):** - -```sql --- Session-level advisory lock (released on disconnect or unlock) -select pg_advisory_lock(hashtext('report_generator')); --- ... do exclusive work ... -select pg_advisory_unlock(hashtext('report_generator')); - --- Transaction-level lock (released on commit/rollback) -begin; -select pg_advisory_xact_lock(hashtext('daily_report')); --- ... do work ... -commit; -- Lock automatically released -``` - -Try-lock for non-blocking operations: - -```sql --- Returns immediately with true/false instead of waiting -select pg_try_advisory_lock(hashtext('resource_name')); - --- Use in application -if (acquired) { - -- Do work - select pg_advisory_unlock(hashtext('resource_name')); -} else { - -- Skip or retry later -} -``` - -Reference: [Advisory Locks](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) diff --git a/.agents/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md b/.agents/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md deleted file mode 100644 index 974da5ed..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Prevent Deadlocks with Consistent Lock Ordering -impact: MEDIUM-HIGH -impactDescription: Eliminate deadlock errors, improve reliability -tags: deadlocks, locking, transactions, ordering ---- - -## Prevent Deadlocks with Consistent Lock Ordering - -Deadlocks occur when transactions lock resources in different orders. Always -acquire locks in a consistent order. - -**Incorrect (inconsistent lock ordering):** - -```sql --- Transaction A -- Transaction B -begin; begin; -update accounts update accounts -set balance = balance - 100 set balance = balance - 50 -where id = 1; where id = 2; -- B locks row 2 - -update accounts update accounts -set balance = balance + 100 set balance = balance + 50 -where id = 2; -- A waits for B where id = 1; -- B waits for A - --- DEADLOCK! Both waiting for each other -``` - -**Correct (lock rows in consistent order first):** - -```sql --- Explicitly acquire locks in ID order before updating -begin; -select * from accounts where id in (1, 2) order by id for update; - --- Now perform updates in any order - locks already held -update accounts set balance = balance - 100 where id = 1; -update accounts set balance = balance + 100 where id = 2; -commit; -``` - -Alternative: use a single statement to update atomically: - -```sql --- Single statement acquires all locks atomically -begin; -update accounts -set balance = balance + case id - when 1 then -100 - when 2 then 100 -end -where id in (1, 2); -commit; -``` - -Detect deadlocks in logs: - -```sql --- Check for recent deadlocks -select * from pg_stat_database where deadlocks > 0; - --- Enable deadlock logging -set log_lock_waits = on; -set deadlock_timeout = '1s'; -``` - -Reference: -[Deadlocks](https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-DEADLOCKS) diff --git a/.agents/skills/supabase-postgres-best-practices/references/lock-short-transactions.md b/.agents/skills/supabase-postgres-best-practices/references/lock-short-transactions.md deleted file mode 100644 index e6b8ef26..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/lock-short-transactions.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Keep Transactions Short to Reduce Lock Contention -impact: MEDIUM-HIGH -impactDescription: 3-5x throughput improvement, fewer deadlocks -tags: transactions, locking, contention, performance ---- - -## Keep Transactions Short to Reduce Lock Contention - -Long-running transactions hold locks that block other queries. Keep transactions as short as possible. - -**Incorrect (long transaction with external calls):** - -```sql -begin; -select * from orders where id = 1 for update; -- Lock acquired - --- Application makes HTTP call to payment API (2-5 seconds) --- Other queries on this row are blocked! - -update orders set status = 'paid' where id = 1; -commit; -- Lock held for entire duration -``` - -**Correct (minimal transaction scope):** - -```sql --- Validate data and call APIs outside transaction --- Application: response = await paymentAPI.charge(...) - --- Only hold lock for the actual update -begin; -update orders -set status = 'paid', payment_id = $1 -where id = $2 and status = 'pending' -returning *; -commit; -- Lock held for milliseconds -``` - -Use `statement_timeout` to prevent runaway transactions: - -```sql --- Abort queries running longer than 30 seconds -set statement_timeout = '30s'; - --- Or per-session -set local statement_timeout = '5s'; -``` - -Reference: [Transaction Management](https://www.postgresql.org/docs/current/tutorial-transactions.html) diff --git a/.agents/skills/supabase-postgres-best-practices/references/lock-skip-locked.md b/.agents/skills/supabase-postgres-best-practices/references/lock-skip-locked.md deleted file mode 100644 index 77bdbb97..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/lock-skip-locked.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Use SKIP LOCKED for Non-Blocking Queue Processing -impact: MEDIUM-HIGH -impactDescription: 10x throughput for worker queues -tags: skip-locked, queue, workers, concurrency ---- - -## Use SKIP LOCKED for Non-Blocking Queue Processing - -When multiple workers process a queue, SKIP LOCKED allows workers to process different rows without waiting. - -**Incorrect (workers block each other):** - -```sql --- Worker 1 and Worker 2 both try to get next job -begin; -select * from jobs where status = 'pending' order by created_at limit 1 for update; --- Worker 2 waits for Worker 1's lock to release! -``` - -**Correct (SKIP LOCKED for parallel processing):** - -```sql --- Each worker skips locked rows and gets the next available -begin; -select * from jobs -where status = 'pending' -order by created_at -limit 1 -for update skip locked; - --- Worker 1 gets job 1, Worker 2 gets job 2 (no waiting) - -update jobs set status = 'processing' where id = $1; -commit; -``` - -Complete queue pattern: - -```sql --- Atomic claim-and-update in one statement -update jobs -set status = 'processing', worker_id = $1, started_at = now() -where id = ( - select id from jobs - where status = 'pending' - order by created_at - limit 1 - for update skip locked -) -returning *; -``` - -Reference: [SELECT FOR UPDATE SKIP LOCKED](https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE) diff --git a/.agents/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md b/.agents/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md deleted file mode 100644 index 542978c3..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use EXPLAIN ANALYZE to Diagnose Slow Queries -impact: LOW-MEDIUM -impactDescription: Identify exact bottlenecks in query execution -tags: explain, analyze, diagnostics, query-plan ---- - -## Use EXPLAIN ANALYZE to Diagnose Slow Queries - -EXPLAIN ANALYZE executes the query and shows actual timings, revealing the true performance bottlenecks. - -**Incorrect (guessing at performance issues):** - -```sql --- Query is slow, but why? -select * from orders where customer_id = 123 and status = 'pending'; --- "It must be missing an index" - but which one? -``` - -**Correct (use EXPLAIN ANALYZE):** - -```sql -explain (analyze, buffers, format text) -select * from orders where customer_id = 123 and status = 'pending'; - --- Output reveals the issue: --- Seq Scan on orders (cost=0.00..25000.00 rows=50 width=100) (actual time=0.015..450.123 rows=50 loops=1) --- Filter: ((customer_id = 123) AND (status = 'pending'::text)) --- Rows Removed by Filter: 999950 --- Buffers: shared hit=5000 read=15000 --- Planning Time: 0.150 ms --- Execution Time: 450.500 ms -``` - -Key things to look for: - -```sql --- Seq Scan on large tables = missing index --- Rows Removed by Filter = poor selectivity or missing index --- Buffers: read >> hit = data not cached, needs more memory --- Nested Loop with high loops = consider different join strategy --- Sort Method: external merge = work_mem too low -``` - -Reference: [EXPLAIN](https://supabase.com/docs/guides/database/inspect) diff --git a/.agents/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md b/.agents/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md deleted file mode 100644 index d7e82f1a..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Enable pg_stat_statements for Query Analysis -impact: LOW-MEDIUM -impactDescription: Identify top resource-consuming queries -tags: pg-stat-statements, monitoring, statistics, performance ---- - -## Enable pg_stat_statements for Query Analysis - -pg_stat_statements tracks execution statistics for all queries, helping identify slow and frequent queries. - -**Incorrect (no visibility into query patterns):** - -```sql --- Database is slow, but which queries are the problem? --- No way to know without pg_stat_statements -``` - -**Correct (enable and query pg_stat_statements):** - -```sql --- Enable the extension -create extension if not exists pg_stat_statements; - --- Find slowest queries by total time -select - calls, - round(total_exec_time::numeric, 2) as total_time_ms, - round(mean_exec_time::numeric, 2) as mean_time_ms, - query -from pg_stat_statements -order by total_exec_time desc -limit 10; - --- Find most frequent queries -select calls, query -from pg_stat_statements -order by calls desc -limit 10; - --- Reset statistics after optimization -select pg_stat_statements_reset(); -``` - -Key metrics to monitor: - -```sql --- Queries with high mean time (candidates for optimization) -select query, mean_exec_time, calls -from pg_stat_statements -where mean_exec_time > 100 -- > 100ms average -order by mean_exec_time desc; -``` - -Reference: [pg_stat_statements](https://supabase.com/docs/guides/database/extensions/pg_stat_statements) diff --git a/.agents/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md b/.agents/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md deleted file mode 100644 index e0e8ea0b..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Maintain Table Statistics with VACUUM and ANALYZE -impact: MEDIUM -impactDescription: 2-10x better query plans with accurate statistics -tags: vacuum, analyze, statistics, maintenance, autovacuum ---- - -## Maintain Table Statistics with VACUUM and ANALYZE - -Outdated statistics cause the query planner to make poor decisions. VACUUM reclaims space, ANALYZE updates statistics. - -**Incorrect (stale statistics):** - -```sql --- Table has 1M rows but stats say 1000 --- Query planner chooses wrong strategy -explain select * from orders where status = 'pending'; --- Shows: Seq Scan (because stats show small table) --- Actually: Index Scan would be much faster -``` - -**Correct (maintain fresh statistics):** - -```sql --- Manually analyze after large data changes -analyze orders; - --- Analyze specific columns used in WHERE clauses -analyze orders (status, created_at); - --- Check when tables were last analyzed -select - relname, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze -from pg_stat_user_tables -order by last_analyze nulls first; -``` - -Autovacuum tuning for busy tables: - -```sql --- Increase frequency for high-churn tables -alter table orders set ( - autovacuum_vacuum_scale_factor = 0.05, -- Vacuum at 5% dead tuples (default 20%) - autovacuum_analyze_scale_factor = 0.02 -- Analyze at 2% changes (default 10%) -); - --- Check autovacuum status -select * from pg_stat_progress_vacuum; -``` - -Reference: [VACUUM](https://supabase.com/docs/guides/database/database-size#vacuum-operations) diff --git a/.agents/skills/supabase-postgres-best-practices/references/query-composite-indexes.md b/.agents/skills/supabase-postgres-best-practices/references/query-composite-indexes.md deleted file mode 100644 index fea64523..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/query-composite-indexes.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Create Composite Indexes for Multi-Column Queries -impact: HIGH -impactDescription: 5-10x faster multi-column queries -tags: indexes, composite-index, multi-column, query-optimization ---- - -## Create Composite Indexes for Multi-Column Queries - -When queries filter on multiple columns, a composite index is more efficient than separate single-column indexes. - -**Incorrect (separate indexes require bitmap scan):** - -```sql --- Two separate indexes -create index orders_status_idx on orders (status); -create index orders_created_idx on orders (created_at); - --- Query must combine both indexes (slower) -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Correct (composite index):** - -```sql --- Single composite index (leftmost column first for equality checks) -create index orders_status_created_idx on orders (status, created_at); - --- Query uses one efficient index scan -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Column order matters** - place equality columns first, range columns last: - -```sql --- Good: status (=) before created_at (>) -create index idx on orders (status, created_at); - --- Works for: WHERE status = 'pending' --- Works for: WHERE status = 'pending' AND created_at > '2024-01-01' --- Does NOT work for: WHERE created_at > '2024-01-01' (leftmost prefix rule) -``` - -Reference: [Multicolumn Indexes](https://www.postgresql.org/docs/current/indexes-multicolumn.html) diff --git a/.agents/skills/supabase-postgres-best-practices/references/query-covering-indexes.md b/.agents/skills/supabase-postgres-best-practices/references/query-covering-indexes.md deleted file mode 100644 index 9d2a4947..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/query-covering-indexes.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Use Covering Indexes to Avoid Table Lookups -impact: MEDIUM-HIGH -impactDescription: 2-5x faster queries by eliminating heap fetches -tags: indexes, covering-index, include, index-only-scan ---- - -## Use Covering Indexes to Avoid Table Lookups - -Covering indexes include all columns needed by a query, enabling index-only scans that skip the table entirely. - -**Incorrect (index scan + heap fetch):** - -```sql -create index users_email_idx on users (email); - --- Must fetch name and created_at from table heap -select email, name, created_at from users where email = 'user@example.com'; -``` - -**Correct (index-only scan with INCLUDE):** - -```sql --- Include non-searchable columns in the index -create index users_email_idx on users (email) include (name, created_at); - --- All columns served from index, no table access needed -select email, name, created_at from users where email = 'user@example.com'; -``` - -Use INCLUDE for columns you SELECT but don't filter on: - -```sql --- Searching by status, but also need customer_id and total -create index orders_status_idx on orders (status) include (customer_id, total); - -select status, customer_id, total from orders where status = 'shipped'; -``` - -Reference: [Index-Only Scans](https://www.postgresql.org/docs/current/indexes-index-only-scans.html) diff --git a/.agents/skills/supabase-postgres-best-practices/references/query-index-types.md b/.agents/skills/supabase-postgres-best-practices/references/query-index-types.md deleted file mode 100644 index 93b32590..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/query-index-types.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Choose the Right Index Type for Your Data -impact: HIGH -impactDescription: 10-100x improvement with correct index type -tags: indexes, btree, gin, gist, brin, hash, index-types ---- - -## Choose the Right Index Type for Your Data - -Different index types excel at different query patterns. The default B-tree isn't always optimal. - -**Incorrect (B-tree for JSONB containment):** - -```sql --- B-tree cannot optimize containment operators -create index products_attrs_idx on products (attributes); -select * from products where attributes @> '{"color": "red"}'; --- Full table scan - B-tree doesn't support @> operator -``` - -**Correct (GIN for JSONB):** - -```sql --- GIN supports @>, ?, ?&, ?| operators -create index products_attrs_idx on products using gin (attributes); -select * from products where attributes @> '{"color": "red"}'; -``` - -Index type guide: - -```sql --- B-tree (default): =, <, >, BETWEEN, IN, IS NULL -create index users_created_idx on users (created_at); - --- GIN: arrays, JSONB, full-text search -create index posts_tags_idx on posts using gin (tags); - --- GiST: geometric data, range types, nearest-neighbor (KNN) queries -create index locations_idx on places using gist (location); - --- BRIN: large time-series tables (10-100x smaller) -create index events_time_idx on events using brin (created_at); - --- Hash: equality-only (slightly faster than B-tree for =) -create index sessions_token_idx on sessions using hash (token); -``` - -Reference: [Index Types](https://www.postgresql.org/docs/current/indexes-types.html) diff --git a/.agents/skills/supabase-postgres-best-practices/references/query-missing-indexes.md b/.agents/skills/supabase-postgres-best-practices/references/query-missing-indexes.md deleted file mode 100644 index e6daace7..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/query-missing-indexes.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Add Indexes on WHERE and JOIN Columns -impact: CRITICAL -impactDescription: 100-1000x faster queries on large tables -tags: indexes, performance, sequential-scan, query-optimization ---- - -## Add Indexes on WHERE and JOIN Columns - -Queries filtering or joining on unindexed columns cause full table scans, which become exponentially slower as tables grow. - -**Incorrect (sequential scan on large table):** - -```sql --- No index on customer_id causes full table scan -select * from orders where customer_id = 123; - --- EXPLAIN shows: Seq Scan on orders (cost=0.00..25000.00 rows=100 width=85) -``` - -**Correct (index scan):** - -```sql --- Create index on frequently filtered column -create index orders_customer_id_idx on orders (customer_id); - -select * from orders where customer_id = 123; - --- EXPLAIN shows: Index Scan using orders_customer_id_idx (cost=0.42..8.44 rows=100 width=85) -``` - -For JOIN columns, always index the foreign key side: - -```sql --- Index the referencing column -create index orders_customer_id_idx on orders (customer_id); - -select c.name, o.total -from customers c -join orders o on o.customer_id = c.id; -``` - -Reference: [Query Optimization](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.agents/skills/supabase-postgres-best-practices/references/query-partial-indexes.md b/.agents/skills/supabase-postgres-best-practices/references/query-partial-indexes.md deleted file mode 100644 index 3e61a341..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/query-partial-indexes.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use Partial Indexes for Filtered Queries -impact: HIGH -impactDescription: 5-20x smaller indexes, faster writes and queries -tags: indexes, partial-index, query-optimization, storage ---- - -## Use Partial Indexes for Filtered Queries - -Partial indexes only include rows matching a WHERE condition, making them smaller and faster when queries consistently filter on the same condition. - -**Incorrect (full index includes irrelevant rows):** - -```sql --- Index includes all rows, even soft-deleted ones -create index users_email_idx on users (email); - --- Query always filters active users -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -**Correct (partial index matches query filter):** - -```sql --- Index only includes active users -create index users_active_email_idx on users (email) -where deleted_at is null; - --- Query uses the smaller, faster index -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -Common use cases for partial indexes: - -```sql --- Only pending orders (status rarely changes once completed) -create index orders_pending_idx on orders (created_at) -where status = 'pending'; - --- Only non-null values -create index products_sku_idx on products (sku) -where sku is not null; -``` - -Reference: [Partial Indexes](https://www.postgresql.org/docs/current/indexes-partial.html) diff --git a/.agents/skills/supabase-postgres-best-practices/references/schema-constraints.md b/.agents/skills/supabase-postgres-best-practices/references/schema-constraints.md deleted file mode 100644 index 1d2ef8f9..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/schema-constraints.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Add Constraints Safely in Migrations -impact: HIGH -impactDescription: Prevents migration failures and enables idempotent schema changes -tags: constraints, migrations, schema, alter-table ---- - -## Add Constraints Safely in Migrations - -PostgreSQL does not support `ADD CONSTRAINT IF NOT EXISTS`. Migrations using this syntax will fail. - -**Incorrect (causes syntax error):** - -```sql --- ERROR: syntax error at or near "not" (SQLSTATE 42601) -alter table public.profiles -add constraint if not exists profiles_birthchart_id_unique unique (birthchart_id); -``` - -**Correct (idempotent constraint creation):** - -```sql --- Use DO block to check before adding -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_unique' - and conrelid = 'public.profiles'::regclass - ) then - alter table public.profiles - add constraint profiles_birthchart_id_unique unique (birthchart_id); - end if; -end $$; -``` - -For all constraint types: - -```sql --- Check constraints -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'check_age_positive' - ) then - alter table users add constraint check_age_positive check (age > 0); - end if; -end $$; - --- Foreign keys -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_fkey' - ) then - alter table profiles - add constraint profiles_birthchart_id_fkey - foreign key (birthchart_id) references birthcharts(id); - end if; -end $$; -``` - -Check if constraint exists: - -```sql --- Query to check constraint existence -select conname, contype, pg_get_constraintdef(oid) -from pg_constraint -where conrelid = 'public.profiles'::regclass; - --- contype values: --- 'p' = PRIMARY KEY --- 'f' = FOREIGN KEY --- 'u' = UNIQUE --- 'c' = CHECK -``` - -Reference: [Constraints](https://www.postgresql.org/docs/current/ddl-constraints.html) diff --git a/.agents/skills/supabase-postgres-best-practices/references/schema-data-types.md b/.agents/skills/supabase-postgres-best-practices/references/schema-data-types.md deleted file mode 100644 index f253a581..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/schema-data-types.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Choose Appropriate Data Types -impact: HIGH -impactDescription: 50% storage reduction, faster comparisons -tags: data-types, schema, storage, performance ---- - -## Choose Appropriate Data Types - -Using the right data types reduces storage, improves query performance, and prevents bugs. - -**Incorrect (wrong data types):** - -```sql -create table users ( - id int, -- Will overflow at 2.1 billion - email varchar(255), -- Unnecessary length limit - created_at timestamp, -- Missing timezone info - is_active varchar(5), -- String for boolean - price varchar(20) -- String for numeric -); -``` - -**Correct (appropriate data types):** - -```sql -create table users ( - id bigint generated always as identity primary key, -- 9 quintillion max - email text, -- No artificial limit, same performance as varchar - created_at timestamptz, -- Always store timezone-aware timestamps - is_active boolean default true, -- 1 byte vs variable string length - price numeric(10,2) -- Exact decimal arithmetic -); -``` - -Key guidelines: - -```sql --- IDs: use bigint, not int (future-proofing) --- Strings: use text, not varchar(n) unless constraint needed --- Time: use timestamptz, not timestamp --- Money: use numeric, not float (precision matters) --- Enums: use text with check constraint or create enum type -``` - -Reference: [Data Types](https://www.postgresql.org/docs/current/datatype.html) diff --git a/.agents/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md b/.agents/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md deleted file mode 100644 index 6c3d6ff6..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Index Foreign Key Columns -impact: HIGH -impactDescription: 10-100x faster JOINs and CASCADE operations -tags: foreign-key, indexes, joins, schema ---- - -## Index Foreign Key Columns - -Postgres does not automatically index foreign key columns. Missing indexes cause slow JOINs and CASCADE operations. - -**Incorrect (unindexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- No index on customer_id! --- JOINs and ON DELETE CASCADE both require full table scan -select * from orders where customer_id = 123; -- Seq Scan -delete from customers where id = 123; -- Locks table, scans all orders -``` - -**Correct (indexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- Always index the FK column -create index orders_customer_id_idx on orders (customer_id); - --- Now JOINs and cascades are fast -select * from orders where customer_id = 123; -- Index Scan -delete from customers where id = 123; -- Uses index, fast cascade -``` - -Find missing FK indexes: - -```sql -select - conrelid::regclass as table_name, - a.attname as fk_column -from pg_constraint c -join pg_attribute a on a.attrelid = c.conrelid and a.attnum = any(c.conkey) -where c.contype = 'f' - and not exists ( - select 1 from pg_index i - where i.indrelid = c.conrelid and a.attnum = any(i.indkey) - ); -``` - -Reference: [Foreign Keys](https://www.postgresql.org/docs/current/ddl-constraints.html#DDL-CONSTRAINTS-FK) diff --git a/.agents/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md b/.agents/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md deleted file mode 100644 index f0072940..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use Lowercase Identifiers for Compatibility -impact: MEDIUM -impactDescription: Avoid case-sensitivity bugs with tools, ORMs, and AI assistants -tags: naming, identifiers, case-sensitivity, schema, conventions ---- - -## Use Lowercase Identifiers for Compatibility - -PostgreSQL folds unquoted identifiers to lowercase. Quoted mixed-case identifiers require quotes forever and cause issues with tools, ORMs, and AI assistants that may not recognize them. - -**Incorrect (mixed-case identifiers):** - -```sql --- Quoted identifiers preserve case but require quotes everywhere -CREATE TABLE "Users" ( - "userId" bigint PRIMARY KEY, - "firstName" text, - "lastName" text -); - --- Must always quote or queries fail -SELECT "firstName" FROM "Users" WHERE "userId" = 1; - --- This fails - Users becomes users without quotes -SELECT firstName FROM Users; --- ERROR: relation "users" does not exist -``` - -**Correct (lowercase snake_case):** - -```sql --- Unquoted lowercase identifiers are portable and tool-friendly -CREATE TABLE users ( - user_id bigint PRIMARY KEY, - first_name text, - last_name text -); - --- Works without quotes, recognized by all tools -SELECT first_name FROM users WHERE user_id = 1; -``` - -Common sources of mixed-case identifiers: - -```sql --- ORMs often generate quoted camelCase - configure them to use snake_case --- Migrations from other databases may preserve original casing --- Some GUI tools quote identifiers by default - disable this - --- If stuck with mixed-case, create views as a compatibility layer -CREATE VIEW users AS SELECT "userId" AS user_id, "firstName" AS first_name FROM "Users"; -``` - -Reference: [Identifiers and Key Words](https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS) diff --git a/.agents/skills/supabase-postgres-best-practices/references/schema-partitioning.md b/.agents/skills/supabase-postgres-best-practices/references/schema-partitioning.md deleted file mode 100644 index 13137a03..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/schema-partitioning.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Partition Large Tables for Better Performance -impact: MEDIUM-HIGH -impactDescription: 5-20x faster queries and maintenance on large tables -tags: partitioning, large-tables, time-series, performance ---- - -## Partition Large Tables for Better Performance - -Partitioning splits a large table into smaller pieces, improving query performance and maintenance operations. - -**Incorrect (single large table):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz, - data jsonb -); - --- 500M rows, queries scan everything -select * from events where created_at > '2024-01-01'; -- Slow -vacuum events; -- Takes hours, locks table -``` - -**Correct (partitioned by time range):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz not null, - data jsonb -) partition by range (created_at); - --- Create partitions for each month -create table events_2024_01 partition of events - for values from ('2024-01-01') to ('2024-02-01'); - -create table events_2024_02 partition of events - for values from ('2024-02-01') to ('2024-03-01'); - --- Queries only scan relevant partitions -select * from events where created_at > '2024-01-15'; -- Only scans events_2024_01+ - --- Drop old data instantly -drop table events_2023_01; -- Instant vs DELETE taking hours -``` - -When to partition: - -- Tables > 100M rows -- Time-series data with date-based queries -- Need to efficiently drop old data - -Reference: [Table Partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) diff --git a/.agents/skills/supabase-postgres-best-practices/references/schema-primary-keys.md b/.agents/skills/supabase-postgres-best-practices/references/schema-primary-keys.md deleted file mode 100644 index fb0fbb16..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/schema-primary-keys.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Select Optimal Primary Key Strategy -impact: HIGH -impactDescription: Better index locality, reduced fragmentation -tags: primary-key, identity, uuid, serial, schema ---- - -## Select Optimal Primary Key Strategy - -Primary key choice affects insert performance, index size, and replication -efficiency. - -**Incorrect (problematic PK choices):** - -```sql --- identity is the SQL-standard approach -create table users ( - id serial primary key -- Works, but IDENTITY is recommended -); - --- Random UUIDs (v4) cause index fragmentation -create table orders ( - id uuid default gen_random_uuid() primary key -- UUIDv4 = random = scattered inserts -); -``` - -**Correct (optimal PK strategies):** - -```sql --- Use IDENTITY for sequential IDs (SQL-standard, best for most cases) -create table users ( - id bigint generated always as identity primary key -); - --- For distributed systems needing UUIDs, use UUIDv7 (time-ordered) --- Requires pg_uuidv7 extension: create extension pg_uuidv7; -create table orders ( - id uuid default uuid_generate_v7() primary key -- Time-ordered, no fragmentation -); - --- Alternative: time-prefixed IDs for sortable, distributed IDs (no extension needed) -create table events ( - id text default concat( - to_char(now() at time zone 'utc', 'YYYYMMDDHH24MISSMS'), - gen_random_uuid()::text - ) primary key -); -``` - -Guidelines: - -- Single database: `bigint identity` (sequential, 8 bytes, SQL-standard) -- Distributed/exposed IDs: UUIDv7 (requires pg_uuidv7) or ULID (time-ordered, no - fragmentation) -- `serial` works but `identity` is SQL-standard and preferred for new - applications -- Avoid random UUIDs (v4) as primary keys on large tables (causes index - fragmentation) - -Reference: -[Identity Columns](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-PARMS-GENERATED-IDENTITY) diff --git a/.agents/skills/supabase-postgres-best-practices/references/security-privileges.md b/.agents/skills/supabase-postgres-best-practices/references/security-privileges.md deleted file mode 100644 index 448ec345..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/security-privileges.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Apply Principle of Least Privilege -impact: MEDIUM -impactDescription: Reduced attack surface, better audit trail -tags: privileges, security, roles, permissions ---- - -## Apply Principle of Least Privilege - -Grant only the minimum permissions required. Never use superuser for application queries. - -**Incorrect (overly broad permissions):** - -```sql --- Application uses superuser connection --- Or grants ALL to application role -grant all privileges on all tables in schema public to app_user; -grant all privileges on all sequences in schema public to app_user; - --- Any SQL injection becomes catastrophic --- drop table users; cascades to everything -``` - -**Correct (minimal, specific grants):** - -```sql --- Create role with no default privileges -create role app_readonly nologin; - --- Grant only SELECT on specific tables -grant usage on schema public to app_readonly; -grant select on public.products, public.categories to app_readonly; - --- Create role for writes with limited scope -create role app_writer nologin; -grant usage on schema public to app_writer; -grant select, insert, update on public.orders to app_writer; -grant usage on sequence orders_id_seq to app_writer; --- No DELETE permission - --- Login role inherits from these -create role app_user login password 'xxx'; -grant app_writer to app_user; -``` - -Revoke public defaults: - -```sql --- Revoke default public access -revoke all on schema public from public; -revoke all on all tables in schema public from public; -``` - -Reference: [Roles and Privileges](https://supabase.com/blog/postgres-roles-and-privileges) diff --git a/.agents/skills/supabase-postgres-best-practices/references/security-rls-basics.md b/.agents/skills/supabase-postgres-best-practices/references/security-rls-basics.md deleted file mode 100644 index c61e1a85..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/security-rls-basics.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Enable Row Level Security for Multi-Tenant Data -impact: CRITICAL -impactDescription: Database-enforced tenant isolation, prevent data leaks -tags: rls, row-level-security, multi-tenant, security ---- - -## Enable Row Level Security for Multi-Tenant Data - -Row Level Security (RLS) enforces data access at the database level, ensuring users only see their own data. - -**Incorrect (application-level filtering only):** - -```sql --- Relying only on application to filter -select * from orders where user_id = $current_user_id; - --- Bug or bypass means all data is exposed! -select * from orders; -- Returns ALL orders -``` - -**Correct (database-enforced RLS):** - -```sql --- Enable RLS on the table -alter table orders enable row level security; - --- Create policy for users to see only their orders -create policy orders_user_policy on orders - for all - using (user_id = current_setting('app.current_user_id')::bigint); - --- Force RLS even for table owners -alter table orders force row level security; - --- Set user context and query -set app.current_user_id = '123'; -select * from orders; -- Only returns orders for user 123 -``` - -Policy for authenticated role: - -```sql -create policy orders_user_policy on orders - for all - to authenticated - using (user_id = auth.uid()); -``` - -Reference: [Row Level Security](https://supabase.com/docs/guides/database/postgres/row-level-security) diff --git a/.agents/skills/supabase-postgres-best-practices/references/security-rls-performance.md b/.agents/skills/supabase-postgres-best-practices/references/security-rls-performance.md deleted file mode 100644 index b32d92f7..00000000 --- a/.agents/skills/supabase-postgres-best-practices/references/security-rls-performance.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Optimize RLS Policies for Performance -impact: HIGH -impactDescription: 5-10x faster RLS queries with proper patterns -tags: rls, performance, security, optimization ---- - -## Optimize RLS Policies for Performance - -Poorly written RLS policies can cause severe performance issues. Use subqueries and indexes strategically. - -**Incorrect (function called for every row):** - -```sql -create policy orders_policy on orders - using (auth.uid() = user_id); -- auth.uid() called per row! - --- With 1M rows, auth.uid() is called 1M times -``` - -**Correct (wrap functions in SELECT):** - -```sql -create policy orders_policy on orders - using ((select auth.uid()) = user_id); -- Called once, cached - --- 100x+ faster on large tables -``` - -Use security definer functions for complex checks: - -```sql --- Create helper function (runs as definer, bypasses RLS) -create or replace function is_team_member(team_id bigint) -returns boolean -language sql -security definer -set search_path = '' -as $$ - select exists ( - select 1 from public.team_members - where team_id = $1 and user_id = (select auth.uid()) - ); -$$; - --- Use in policy (indexed lookup, not per-row check) -create policy team_orders_policy on orders - using ((select is_team_member(team_id))); -``` - -Always add indexes on columns used in RLS policies: - -```sql -create index orders_user_id_idx on orders (user_id); -``` - -Reference: [RLS Performance](https://supabase.com/docs/guides/database/postgres/row-level-security#rls-performance-recommendations) diff --git a/.augment/skills/supabase-postgres-best-practices/AGENTS.md b/.augment/skills/supabase-postgres-best-practices/AGENTS.md deleted file mode 100644 index a7baf445..00000000 --- a/.augment/skills/supabase-postgres-best-practices/AGENTS.md +++ /dev/null @@ -1,68 +0,0 @@ -# Supabase Postgres Best Practices - -## Structure - -``` -supabase-postgres-best-practices/ - SKILL.md # Main skill file - read this first - AGENTS.md # This navigation guide - CLAUDE.md # Symlink to AGENTS.md - references/ # Detailed reference files -``` - -## Usage - -1. Read `SKILL.md` for the main skill instructions -2. Browse `references/` for detailed documentation on specific topics -3. Reference files are loaded on-demand - read only what you need - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.augment/skills/supabase-postgres-best-practices/CLAUDE.md b/.augment/skills/supabase-postgres-best-practices/CLAUDE.md deleted file mode 100644 index 47dc3e3d..00000000 --- a/.augment/skills/supabase-postgres-best-practices/CLAUDE.md +++ /dev/null @@ -1 +0,0 @@ -AGENTS.md \ No newline at end of file diff --git a/.augment/skills/supabase-postgres-best-practices/README.md b/.augment/skills/supabase-postgres-best-practices/README.md deleted file mode 100644 index f1a374e1..00000000 --- a/.augment/skills/supabase-postgres-best-practices/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# Supabase Postgres Best Practices - Contributor Guide - -This skill contains Postgres performance optimization references optimized for -AI agents and LLMs. It follows the [Agent Skills Open Standard](https://agentskills.io/). - -## Quick Start - -```bash -# From repository root -npm install - -# Validate existing references -npm run validate - -# Build AGENTS.md -npm run build -``` - -## Creating a New Reference - -1. **Choose a section prefix** based on the category: - - `query-` Query Performance (CRITICAL) - - `conn-` Connection Management (CRITICAL) - - `security-` Security & RLS (CRITICAL) - - `schema-` Schema Design (HIGH) - - `lock-` Concurrency & Locking (MEDIUM-HIGH) - - `data-` Data Access Patterns (MEDIUM) - - `monitor-` Monitoring & Diagnostics (LOW-MEDIUM) - - `advanced-` Advanced Features (LOW) - -2. **Copy the template**: - ```bash - cp references/_template.md references/query-your-reference-name.md - ``` - -3. **Fill in the content** following the template structure - -4. **Validate and build**: - ```bash - npm run validate - npm run build - ``` - -5. **Review** the generated `AGENTS.md` - -## Skill Structure - -``` -skills/supabase-postgres-best-practices/ -β”œβ”€β”€ SKILL.md # Agent-facing skill manifest (Agent Skills spec) -β”œβ”€β”€ AGENTS.md # [GENERATED] Compiled references document -β”œβ”€β”€ README.md # This file -└── references/ - β”œβ”€β”€ _template.md # Reference template - β”œβ”€β”€ _sections.md # Section definitions - β”œβ”€β”€ _contributing.md # Writing guidelines - └── *.md # Individual references - -packages/skills-build/ -β”œβ”€β”€ src/ # Generic build system source -└── package.json # NPM scripts -``` - -## Reference File Structure - -See `references/_template.md` for the complete template. Key elements: - -````markdown ---- -title: Clear, Action-Oriented Title -impact: CRITICAL|HIGH|MEDIUM-HIGH|MEDIUM|LOW-MEDIUM|LOW -impactDescription: Quantified benefit (e.g., "10-100x faster") -tags: relevant, keywords ---- - -## [Title] - -[1-2 sentence explanation] - -**Incorrect (description):** - -```sql --- Comment explaining what's wrong -[Bad SQL example] -``` -```` - -**Correct (description):** - -```sql --- Comment explaining why this is better -[Good SQL example] -``` - -``` -## Writing Guidelines - -See `references/_contributing.md` for detailed guidelines. Key principles: - -1. **Show concrete transformations** - "Change X to Y", not abstract advice -2. **Error-first structure** - Show the problem before the solution -3. **Quantify impact** - Include specific metrics (10x faster, 50% smaller) -4. **Self-contained examples** - Complete, runnable SQL -5. **Semantic naming** - Use meaningful names (users, email), not (table1, col1) - -## Impact Levels - -| Level | Improvement | Examples | -|-------|-------------|----------| -| CRITICAL | 10-100x | Missing indexes, connection exhaustion | -| HIGH | 5-20x | Wrong index types, poor partitioning | -| MEDIUM-HIGH | 2-5x | N+1 queries, RLS optimization | -| MEDIUM | 1.5-3x | Redundant indexes, stale statistics | -| LOW-MEDIUM | 1.2-2x | VACUUM tuning, config tweaks | -| LOW | Incremental | Advanced patterns, edge cases | -``` diff --git a/.augment/skills/supabase-postgres-best-practices/SKILL.md b/.augment/skills/supabase-postgres-best-practices/SKILL.md deleted file mode 100644 index f80be156..00000000 --- a/.augment/skills/supabase-postgres-best-practices/SKILL.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -name: supabase-postgres-best-practices -description: Postgres performance optimization and best practices from Supabase. Use this skill when writing, reviewing, or optimizing Postgres queries, schema designs, or database configurations. -license: MIT -metadata: - author: supabase - version: "1.1.0" - organization: Supabase - date: January 2026 - abstract: Comprehensive Postgres performance optimization guide for developers using Supabase and Postgres. Contains performance rules across 8 categories, prioritized by impact from critical (query performance, connection management) to incremental (advanced features). Each rule includes detailed explanations, incorrect vs. correct SQL examples, query plan analysis, and specific performance metrics to guide automated optimization and code generation. ---- - -# Supabase Postgres Best Practices - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.augment/skills/supabase-postgres-best-practices/references/_contributing.md b/.augment/skills/supabase-postgres-best-practices/references/_contributing.md deleted file mode 100644 index 10de8ecb..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/_contributing.md +++ /dev/null @@ -1,171 +0,0 @@ -# Writing Guidelines for Postgres References - -This document provides guidelines for creating effective Postgres best -practice references that work well with AI agents and LLMs. - -## Key Principles - -### 1. Concrete Transformation Patterns - -Show exact SQL rewrites. Avoid philosophical advice. - -**Good:** "Use `WHERE id = ANY(ARRAY[...])` instead of -`WHERE id IN (SELECT ...)`" **Bad:** "Design good schemas" - -### 2. Error-First Structure - -Always show the problematic pattern first, then the solution. This trains agents -to recognize anti-patterns. - -```markdown -**Incorrect (sequential queries):** [bad example] - -**Correct (batched query):** [good example] -``` - -### 3. Quantified Impact - -Include specific metrics. Helps agents prioritize fixes. - -**Good:** "10x faster queries", "50% smaller index", "Eliminates N+1" -**Bad:** "Faster", "Better", "More efficient" - -### 4. Self-Contained Examples - -Examples should be complete and runnable (or close to it). Include `CREATE TABLE` -if context is needed. - -```sql --- Include table definition when needed for clarity -CREATE TABLE users ( - id bigint PRIMARY KEY, - email text NOT NULL, - deleted_at timestamptz -); - --- Now show the index -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; -``` - -### 5. Semantic Naming - -Use meaningful table/column names. Names carry intent for LLMs. - -**Good:** `users`, `email`, `created_at`, `is_active` -**Bad:** `table1`, `col1`, `field`, `flag` - ---- - -## Code Example Standards - -### SQL Formatting - -```sql --- Use lowercase keywords, clear formatting -CREATE INDEX CONCURRENTLY users_email_idx - ON users(email) - WHERE deleted_at IS NULL; - --- Not cramped or ALL CAPS -CREATE INDEX CONCURRENTLY USERS_EMAIL_IDX ON USERS(EMAIL) WHERE DELETED_AT IS NULL; -``` - -### Comments - -- Explain _why_, not _what_ -- Highlight performance implications -- Point out common pitfalls - -### Language Tags - -- `sql` - Standard SQL queries -- `plpgsql` - Stored procedures/functions -- `typescript` - Application code (when needed) -- `python` - Application code (when needed) - ---- - -## When to Include Application Code - -**Default: SQL Only** - -Most references should focus on pure SQL patterns. This keeps examples portable. - -**Include Application Code When:** - -- Connection pooling configuration -- Transaction management in application context -- ORM anti-patterns (N+1 in Prisma/TypeORM) -- Prepared statement usage - -**Format for Mixed Examples:** - -````markdown -**Incorrect (N+1 in application):** - -```typescript -for (const user of users) { - const posts = await db.query("SELECT * FROM posts WHERE user_id = $1", [ - user.id, - ]); -} -``` -```` - -**Correct (batch query):** - -```typescript -const posts = await db.query("SELECT * FROM posts WHERE user_id = ANY($1)", [ - userIds, -]); -``` - ---- - -## Impact Level Guidelines - -| Level | Improvement | Use When | -|-------|-------------|----------| -| **CRITICAL** | 10-100x | Missing indexes, connection exhaustion, sequential scans on large tables | -| **HIGH** | 5-20x | Wrong index types, poor partitioning, missing covering indexes | -| **MEDIUM-HIGH** | 2-5x | N+1 queries, inefficient pagination, RLS optimization | -| **MEDIUM** | 1.5-3x | Redundant indexes, query plan instability | -| **LOW-MEDIUM** | 1.2-2x | VACUUM tuning, configuration tweaks | -| **LOW** | Incremental | Advanced patterns, edge cases | - ---- - -## Reference Standards - -**Primary Sources:** - -- Official Postgres documentation -- Supabase documentation -- Postgres wiki -- Established blogs (2ndQuadrant, Crunchy Data) - -**Format:** - -```markdown -Reference: -[Postgres Indexes](https://www.postgresql.org/docs/current/indexes.html) -``` - ---- - -## Review Checklist - -Before submitting a reference: - -- [ ] Title is clear and action-oriented -- [ ] Impact level matches the performance gain -- [ ] impactDescription includes quantification -- [ ] Explanation is concise (1-2 sentences) -- [ ] Has at least 1 **Incorrect** SQL example -- [ ] Has at least 1 **Correct** SQL example -- [ ] SQL uses semantic naming -- [ ] Comments explain _why_, not _what_ -- [ ] Trade-offs mentioned if applicable -- [ ] Reference links included -- [ ] `npm run validate` passes -- [ ] `npm run build` generates correct output diff --git a/.augment/skills/supabase-postgres-best-practices/references/_sections.md b/.augment/skills/supabase-postgres-best-practices/references/_sections.md deleted file mode 100644 index 8ba57c23..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/_sections.md +++ /dev/null @@ -1,39 +0,0 @@ -# Section Definitions - -This file defines the rule categories for Postgres best practices. Rules are automatically assigned to sections based on their filename prefix. - -Take the examples below as pure demonstrative. Replace each section with the actual rule categories for Postgres best practices. - ---- - -## 1. Query Performance (query) -**Impact:** CRITICAL -**Description:** Slow queries, missing indexes, inefficient query plans. The most common source of Postgres performance issues. - -## 2. Connection Management (conn) -**Impact:** CRITICAL -**Description:** Connection pooling, limits, and serverless strategies. Critical for applications with high concurrency or serverless deployments. - -## 3. Security & RLS (security) -**Impact:** CRITICAL -**Description:** Row-Level Security policies, privilege management, and authentication patterns. - -## 4. Schema Design (schema) -**Impact:** HIGH -**Description:** Table design, index strategies, partitioning, and data type selection. Foundation for long-term performance. - -## 5. Concurrency & Locking (lock) -**Impact:** MEDIUM-HIGH -**Description:** Transaction management, isolation levels, deadlock prevention, and lock contention patterns. - -## 6. Data Access Patterns (data) -**Impact:** MEDIUM -**Description:** N+1 query elimination, batch operations, cursor-based pagination, and efficient data fetching. - -## 7. Monitoring & Diagnostics (monitor) -**Impact:** LOW-MEDIUM -**Description:** Using pg_stat_statements, EXPLAIN ANALYZE, metrics collection, and performance diagnostics. - -## 8. Advanced Features (advanced) -**Impact:** LOW -**Description:** Full-text search, JSONB optimization, PostGIS, extensions, and advanced Postgres features. diff --git a/.augment/skills/supabase-postgres-best-practices/references/_template.md b/.augment/skills/supabase-postgres-best-practices/references/_template.md deleted file mode 100644 index 91ace90e..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/_template.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Clear, Action-Oriented Title (e.g., "Use Partial Indexes for Filtered Queries") -impact: MEDIUM -impactDescription: 5-20x query speedup for filtered queries -tags: indexes, query-optimization, performance ---- - -## [Rule Title] - -[1-2 sentence explanation of the problem and why it matters. Focus on performance impact.] - -**Incorrect (describe the problem):** - -```sql --- Comment explaining what makes this slow/problematic -CREATE INDEX users_email_idx ON users(email); - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- This scans deleted records unnecessarily -``` - -**Correct (describe the solution):** - -```sql --- Comment explaining why this is better -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- Only indexes active users, 10x smaller index, faster queries -``` - -[Optional: Additional context, edge cases, or trade-offs] - -Reference: [Postgres Docs](https://www.postgresql.org/docs/current/) diff --git a/.augment/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md b/.augment/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md deleted file mode 100644 index 582cbeaa..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use tsvector for Full-Text Search -impact: MEDIUM -impactDescription: 100x faster than LIKE, with ranking support -tags: full-text-search, tsvector, gin, search ---- - -## Use tsvector for Full-Text Search - -LIKE with wildcards can't use indexes. Full-text search with tsvector is orders of magnitude faster. - -**Incorrect (LIKE pattern matching):** - -```sql --- Cannot use index, scans all rows -select * from articles where content like '%postgresql%'; - --- Case-insensitive makes it worse -select * from articles where lower(content) like '%postgresql%'; -``` - -**Correct (full-text search with tsvector):** - -```sql --- Add tsvector column and index -alter table articles add column search_vector tsvector - generated always as (to_tsvector('english', coalesce(title,'') || ' ' || coalesce(content,''))) stored; - -create index articles_search_idx on articles using gin (search_vector); - --- Fast full-text search -select * from articles -where search_vector @@ to_tsquery('english', 'postgresql & performance'); - --- With ranking -select *, ts_rank(search_vector, query) as rank -from articles, to_tsquery('english', 'postgresql') query -where search_vector @@ query -order by rank desc; -``` - -Search multiple terms: - -```sql --- AND: both terms required -to_tsquery('postgresql & performance') - --- OR: either term -to_tsquery('postgresql | mysql') - --- Prefix matching -to_tsquery('post:*') -``` - -Reference: [Full Text Search](https://supabase.com/docs/guides/database/full-text-search) diff --git a/.augment/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md b/.augment/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md deleted file mode 100644 index e3d261ea..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Index JSONB Columns for Efficient Querying -impact: MEDIUM -impactDescription: 10-100x faster JSONB queries with proper indexing -tags: jsonb, gin, indexes, json ---- - -## Index JSONB Columns for Efficient Querying - -JSONB queries without indexes scan the entire table. Use GIN indexes for containment queries. - -**Incorrect (no index on JSONB):** - -```sql -create table products ( - id bigint primary key, - attributes jsonb -); - --- Full table scan for every query -select * from products where attributes @> '{"color": "red"}'; -select * from products where attributes->>'brand' = 'Nike'; -``` - -**Correct (GIN index for JSONB):** - -```sql --- GIN index for containment operators (@>, ?, ?&, ?|) -create index products_attrs_gin on products using gin (attributes); - --- Now containment queries use the index -select * from products where attributes @> '{"color": "red"}'; - --- For specific key lookups, use expression index -create index products_brand_idx on products ((attributes->>'brand')); -select * from products where attributes->>'brand' = 'Nike'; -``` - -Choose the right operator class: - -```sql --- jsonb_ops (default): supports all operators, larger index -create index idx1 on products using gin (attributes); - --- jsonb_path_ops: only @> operator, but 2-3x smaller index -create index idx2 on products using gin (attributes jsonb_path_ops); -``` - -Reference: [JSONB Indexes](https://www.postgresql.org/docs/current/datatype-json.html#JSON-INDEXING) diff --git a/.augment/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md b/.augment/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md deleted file mode 100644 index 40b9cc50..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Configure Idle Connection Timeouts -impact: HIGH -impactDescription: Reclaim 30-50% of connection slots from idle clients -tags: connections, timeout, idle, resource-management ---- - -## Configure Idle Connection Timeouts - -Idle connections waste resources. Configure timeouts to automatically reclaim them. - -**Incorrect (connections held indefinitely):** - -```sql --- No timeout configured -show idle_in_transaction_session_timeout; -- 0 (disabled) - --- Connections stay open forever, even when idle -select pid, state, state_change, query -from pg_stat_activity -where state = 'idle in transaction'; --- Shows transactions idle for hours, holding locks -``` - -**Correct (automatic cleanup of idle connections):** - -```sql --- Terminate connections idle in transaction after 30 seconds -alter system set idle_in_transaction_session_timeout = '30s'; - --- Terminate completely idle connections after 10 minutes -alter system set idle_session_timeout = '10min'; - --- Reload configuration -select pg_reload_conf(); -``` - -For pooled connections, configure at the pooler level: - -```ini -# pgbouncer.ini -server_idle_timeout = 60 -client_idle_timeout = 300 -``` - -Reference: [Connection Timeouts](https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT) diff --git a/.augment/skills/supabase-postgres-best-practices/references/conn-limits.md b/.augment/skills/supabase-postgres-best-practices/references/conn-limits.md deleted file mode 100644 index cb3e400c..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/conn-limits.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Set Appropriate Connection Limits -impact: CRITICAL -impactDescription: Prevent database crashes and memory exhaustion -tags: connections, max-connections, limits, stability ---- - -## Set Appropriate Connection Limits - -Too many connections exhaust memory and degrade performance. Set limits based on available resources. - -**Incorrect (unlimited or excessive connections):** - -```sql --- Default max_connections = 100, but often increased blindly -show max_connections; -- 500 (way too high for 4GB RAM) - --- Each connection uses 1-3MB RAM --- 500 connections * 2MB = 1GB just for connections! --- Out of memory errors under load -``` - -**Correct (calculate based on resources):** - -```sql --- Formula: max_connections = (RAM in MB / 5MB per connection) - reserved --- For 4GB RAM: (4096 / 5) - 10 = ~800 theoretical max --- But practically, 100-200 is better for query performance - --- Recommended settings for 4GB RAM -alter system set max_connections = 100; - --- Also set work_mem appropriately --- work_mem * max_connections should not exceed 25% of RAM -alter system set work_mem = '8MB'; -- 8MB * 100 = 800MB max -``` - -Monitor connection usage: - -```sql -select count(*), state from pg_stat_activity group by state; -``` - -Reference: [Database Connections](https://supabase.com/docs/guides/platform/performance#connection-management) diff --git a/.augment/skills/supabase-postgres-best-practices/references/conn-pooling.md b/.augment/skills/supabase-postgres-best-practices/references/conn-pooling.md deleted file mode 100644 index e2ebd581..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/conn-pooling.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Use Connection Pooling for All Applications -impact: CRITICAL -impactDescription: Handle 10-100x more concurrent users -tags: connection-pooling, pgbouncer, performance, scalability ---- - -## Use Connection Pooling for All Applications - -Postgres connections are expensive (1-3MB RAM each). Without pooling, applications exhaust connections under load. - -**Incorrect (new connection per request):** - -```sql --- Each request creates a new connection --- Application code: db.connect() per request --- Result: 500 concurrent users = 500 connections = crashed database - --- Check current connections -select count(*) from pg_stat_activity; -- 487 connections! -``` - -**Correct (connection pooling):** - -```sql --- Use a pooler like PgBouncer between app and database --- Application connects to pooler, pooler reuses a small pool to Postgres - --- Configure pool_size based on: (CPU cores * 2) + spindle_count --- Example for 4 cores: pool_size = 10 - --- Result: 500 concurrent users share 10 actual connections -select count(*) from pg_stat_activity; -- 10 connections -``` - -Pool modes: - -- **Transaction mode**: connection returned after each transaction (best for most apps) -- **Session mode**: connection held for entire session (needed for prepared statements, temp tables) - -Reference: [Connection Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pooler) diff --git a/.augment/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md b/.augment/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md deleted file mode 100644 index 555547d8..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Use Prepared Statements Correctly with Pooling -impact: HIGH -impactDescription: Avoid prepared statement conflicts in pooled environments -tags: prepared-statements, connection-pooling, transaction-mode ---- - -## Use Prepared Statements Correctly with Pooling - -Prepared statements are tied to individual database connections. In transaction-mode pooling, connections are shared, causing conflicts. - -**Incorrect (named prepared statements with transaction pooling):** - -```sql --- Named prepared statement -prepare get_user as select * from users where id = $1; - --- In transaction mode pooling, next request may get different connection -execute get_user(123); --- ERROR: prepared statement "get_user" does not exist -``` - -**Correct (use unnamed statements or session mode):** - -```sql --- Option 1: Use unnamed prepared statements (most ORMs do this automatically) --- The query is prepared and executed in a single protocol message - --- Option 2: Deallocate after use in transaction mode -prepare get_user as select * from users where id = $1; -execute get_user(123); -deallocate get_user; - --- Option 3: Use session mode pooling (port 5432 vs 6543) --- Connection is held for entire session, prepared statements persist -``` - -Check your driver settings: - -```sql --- Many drivers use prepared statements by default --- Node.js pg: { prepare: false } to disable --- JDBC: prepareThreshold=0 to disable -``` - -Reference: [Prepared Statements with Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pool-modes) diff --git a/.augment/skills/supabase-postgres-best-practices/references/data-batch-inserts.md b/.augment/skills/supabase-postgres-best-practices/references/data-batch-inserts.md deleted file mode 100644 index 997947cb..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/data-batch-inserts.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Batch INSERT Statements for Bulk Data -impact: MEDIUM -impactDescription: 10-50x faster bulk inserts -tags: batch, insert, bulk, performance, copy ---- - -## Batch INSERT Statements for Bulk Data - -Individual INSERT statements have high overhead. Batch multiple rows in single statements or use COPY. - -**Incorrect (individual inserts):** - -```sql --- Each insert is a separate transaction and round trip -insert into events (user_id, action) values (1, 'click'); -insert into events (user_id, action) values (1, 'view'); -insert into events (user_id, action) values (2, 'click'); --- ... 1000 more individual inserts - --- 1000 inserts = 1000 round trips = slow -``` - -**Correct (batch insert):** - -```sql --- Multiple rows in single statement -insert into events (user_id, action) values - (1, 'click'), - (1, 'view'), - (2, 'click'), - -- ... up to ~1000 rows per batch - (999, 'view'); - --- One round trip for 1000 rows -``` - -For large imports, use COPY: - -```sql --- COPY is fastest for bulk loading -copy events (user_id, action, created_at) -from '/path/to/data.csv' -with (format csv, header true); - --- Or from stdin in application -copy events (user_id, action) from stdin with (format csv); -1,click -1,view -2,click -\. -``` - -Reference: [COPY](https://www.postgresql.org/docs/current/sql-copy.html) diff --git a/.augment/skills/supabase-postgres-best-practices/references/data-n-plus-one.md b/.augment/skills/supabase-postgres-best-practices/references/data-n-plus-one.md deleted file mode 100644 index 2109186f..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/data-n-plus-one.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Eliminate N+1 Queries with Batch Loading -impact: MEDIUM-HIGH -impactDescription: 10-100x fewer database round trips -tags: n-plus-one, batch, performance, queries ---- - -## Eliminate N+1 Queries with Batch Loading - -N+1 queries execute one query per item in a loop. Batch them into a single query using arrays or JOINs. - -**Incorrect (N+1 queries):** - -```sql --- First query: get all users -select id from users where active = true; -- Returns 100 IDs - --- Then N queries, one per user -select * from orders where user_id = 1; -select * from orders where user_id = 2; -select * from orders where user_id = 3; --- ... 97 more queries! - --- Total: 101 round trips to database -``` - -**Correct (single batch query):** - -```sql --- Collect IDs and query once with ANY -select * from orders where user_id = any(array[1, 2, 3, ...]); - --- Or use JOIN instead of loop -select u.id, u.name, o.* -from users u -left join orders o on o.user_id = u.id -where u.active = true; - --- Total: 1 round trip -``` - -Application pattern: - -```sql --- Instead of looping in application code: --- for user in users: db.query("SELECT * FROM orders WHERE user_id = $1", user.id) - --- Pass array parameter: -select * from orders where user_id = any($1::bigint[]); --- Application passes: [1, 2, 3, 4, 5, ...] -``` - -Reference: [N+1 Query Problem](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.augment/skills/supabase-postgres-best-practices/references/data-pagination.md b/.augment/skills/supabase-postgres-best-practices/references/data-pagination.md deleted file mode 100644 index 633d8393..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/data-pagination.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use Cursor-Based Pagination Instead of OFFSET -impact: MEDIUM-HIGH -impactDescription: Consistent O(1) performance regardless of page depth -tags: pagination, cursor, keyset, offset, performance ---- - -## Use Cursor-Based Pagination Instead of OFFSET - -OFFSET-based pagination scans all skipped rows, getting slower on deeper pages. Cursor pagination is O(1). - -**Incorrect (OFFSET pagination):** - -```sql --- Page 1: scans 20 rows -select * from products order by id limit 20 offset 0; - --- Page 100: scans 2000 rows to skip 1980 -select * from products order by id limit 20 offset 1980; - --- Page 10000: scans 200,000 rows! -select * from products order by id limit 20 offset 199980; -``` - -**Correct (cursor/keyset pagination):** - -```sql --- Page 1: get first 20 -select * from products order by id limit 20; --- Application stores last_id = 20 - --- Page 2: start after last ID -select * from products where id > 20 order by id limit 20; --- Uses index, always fast regardless of page depth - --- Page 10000: same speed as page 1 -select * from products where id > 199980 order by id limit 20; -``` - -For multi-column sorting: - -```sql --- Cursor must include all sort columns -select * from products -where (created_at, id) > ('2024-01-15 10:00:00', 12345) -order by created_at, id -limit 20; -``` - -Reference: [Pagination](https://supabase.com/docs/guides/database/pagination) diff --git a/.augment/skills/supabase-postgres-best-practices/references/data-upsert.md b/.augment/skills/supabase-postgres-best-practices/references/data-upsert.md deleted file mode 100644 index bc95e230..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/data-upsert.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use UPSERT for Insert-or-Update Operations -impact: MEDIUM -impactDescription: Atomic operation, eliminates race conditions -tags: upsert, on-conflict, insert, update ---- - -## Use UPSERT for Insert-or-Update Operations - -Using separate SELECT-then-INSERT/UPDATE creates race conditions. Use INSERT ... ON CONFLICT for atomic upserts. - -**Incorrect (check-then-insert race condition):** - -```sql --- Race condition: two requests check simultaneously -select * from settings where user_id = 123 and key = 'theme'; --- Both find nothing - --- Both try to insert -insert into settings (user_id, key, value) values (123, 'theme', 'dark'); --- One succeeds, one fails with duplicate key error! -``` - -**Correct (atomic UPSERT):** - -```sql --- Single atomic operation -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value, updated_at = now(); - --- Returns the inserted/updated row -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value -returning *; -``` - -Insert-or-ignore pattern: - -```sql --- Insert only if not exists (no update) -insert into page_views (page_id, user_id) -values (1, 123) -on conflict (page_id, user_id) do nothing; -``` - -Reference: [INSERT ON CONFLICT](https://www.postgresql.org/docs/current/sql-insert.html#SQL-ON-CONFLICT) diff --git a/.augment/skills/supabase-postgres-best-practices/references/lock-advisory.md b/.augment/skills/supabase-postgres-best-practices/references/lock-advisory.md deleted file mode 100644 index 572eaf0d..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/lock-advisory.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Use Advisory Locks for Application-Level Locking -impact: MEDIUM -impactDescription: Efficient coordination without row-level lock overhead -tags: advisory-locks, coordination, application-locks ---- - -## Use Advisory Locks for Application-Level Locking - -Advisory locks provide application-level coordination without requiring database rows to lock. - -**Incorrect (creating rows just for locking):** - -```sql --- Creating dummy rows to lock on -create table resource_locks ( - resource_name text primary key -); - -insert into resource_locks values ('report_generator'); - --- Lock by selecting the row -select * from resource_locks where resource_name = 'report_generator' for update; -``` - -**Correct (advisory locks):** - -```sql --- Session-level advisory lock (released on disconnect or unlock) -select pg_advisory_lock(hashtext('report_generator')); --- ... do exclusive work ... -select pg_advisory_unlock(hashtext('report_generator')); - --- Transaction-level lock (released on commit/rollback) -begin; -select pg_advisory_xact_lock(hashtext('daily_report')); --- ... do work ... -commit; -- Lock automatically released -``` - -Try-lock for non-blocking operations: - -```sql --- Returns immediately with true/false instead of waiting -select pg_try_advisory_lock(hashtext('resource_name')); - --- Use in application -if (acquired) { - -- Do work - select pg_advisory_unlock(hashtext('resource_name')); -} else { - -- Skip or retry later -} -``` - -Reference: [Advisory Locks](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) diff --git a/.augment/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md b/.augment/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md deleted file mode 100644 index 974da5ed..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Prevent Deadlocks with Consistent Lock Ordering -impact: MEDIUM-HIGH -impactDescription: Eliminate deadlock errors, improve reliability -tags: deadlocks, locking, transactions, ordering ---- - -## Prevent Deadlocks with Consistent Lock Ordering - -Deadlocks occur when transactions lock resources in different orders. Always -acquire locks in a consistent order. - -**Incorrect (inconsistent lock ordering):** - -```sql --- Transaction A -- Transaction B -begin; begin; -update accounts update accounts -set balance = balance - 100 set balance = balance - 50 -where id = 1; where id = 2; -- B locks row 2 - -update accounts update accounts -set balance = balance + 100 set balance = balance + 50 -where id = 2; -- A waits for B where id = 1; -- B waits for A - --- DEADLOCK! Both waiting for each other -``` - -**Correct (lock rows in consistent order first):** - -```sql --- Explicitly acquire locks in ID order before updating -begin; -select * from accounts where id in (1, 2) order by id for update; - --- Now perform updates in any order - locks already held -update accounts set balance = balance - 100 where id = 1; -update accounts set balance = balance + 100 where id = 2; -commit; -``` - -Alternative: use a single statement to update atomically: - -```sql --- Single statement acquires all locks atomically -begin; -update accounts -set balance = balance + case id - when 1 then -100 - when 2 then 100 -end -where id in (1, 2); -commit; -``` - -Detect deadlocks in logs: - -```sql --- Check for recent deadlocks -select * from pg_stat_database where deadlocks > 0; - --- Enable deadlock logging -set log_lock_waits = on; -set deadlock_timeout = '1s'; -``` - -Reference: -[Deadlocks](https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-DEADLOCKS) diff --git a/.augment/skills/supabase-postgres-best-practices/references/lock-short-transactions.md b/.augment/skills/supabase-postgres-best-practices/references/lock-short-transactions.md deleted file mode 100644 index e6b8ef26..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/lock-short-transactions.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Keep Transactions Short to Reduce Lock Contention -impact: MEDIUM-HIGH -impactDescription: 3-5x throughput improvement, fewer deadlocks -tags: transactions, locking, contention, performance ---- - -## Keep Transactions Short to Reduce Lock Contention - -Long-running transactions hold locks that block other queries. Keep transactions as short as possible. - -**Incorrect (long transaction with external calls):** - -```sql -begin; -select * from orders where id = 1 for update; -- Lock acquired - --- Application makes HTTP call to payment API (2-5 seconds) --- Other queries on this row are blocked! - -update orders set status = 'paid' where id = 1; -commit; -- Lock held for entire duration -``` - -**Correct (minimal transaction scope):** - -```sql --- Validate data and call APIs outside transaction --- Application: response = await paymentAPI.charge(...) - --- Only hold lock for the actual update -begin; -update orders -set status = 'paid', payment_id = $1 -where id = $2 and status = 'pending' -returning *; -commit; -- Lock held for milliseconds -``` - -Use `statement_timeout` to prevent runaway transactions: - -```sql --- Abort queries running longer than 30 seconds -set statement_timeout = '30s'; - --- Or per-session -set local statement_timeout = '5s'; -``` - -Reference: [Transaction Management](https://www.postgresql.org/docs/current/tutorial-transactions.html) diff --git a/.augment/skills/supabase-postgres-best-practices/references/lock-skip-locked.md b/.augment/skills/supabase-postgres-best-practices/references/lock-skip-locked.md deleted file mode 100644 index 77bdbb97..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/lock-skip-locked.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Use SKIP LOCKED for Non-Blocking Queue Processing -impact: MEDIUM-HIGH -impactDescription: 10x throughput for worker queues -tags: skip-locked, queue, workers, concurrency ---- - -## Use SKIP LOCKED for Non-Blocking Queue Processing - -When multiple workers process a queue, SKIP LOCKED allows workers to process different rows without waiting. - -**Incorrect (workers block each other):** - -```sql --- Worker 1 and Worker 2 both try to get next job -begin; -select * from jobs where status = 'pending' order by created_at limit 1 for update; --- Worker 2 waits for Worker 1's lock to release! -``` - -**Correct (SKIP LOCKED for parallel processing):** - -```sql --- Each worker skips locked rows and gets the next available -begin; -select * from jobs -where status = 'pending' -order by created_at -limit 1 -for update skip locked; - --- Worker 1 gets job 1, Worker 2 gets job 2 (no waiting) - -update jobs set status = 'processing' where id = $1; -commit; -``` - -Complete queue pattern: - -```sql --- Atomic claim-and-update in one statement -update jobs -set status = 'processing', worker_id = $1, started_at = now() -where id = ( - select id from jobs - where status = 'pending' - order by created_at - limit 1 - for update skip locked -) -returning *; -``` - -Reference: [SELECT FOR UPDATE SKIP LOCKED](https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE) diff --git a/.augment/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md b/.augment/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md deleted file mode 100644 index 542978c3..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use EXPLAIN ANALYZE to Diagnose Slow Queries -impact: LOW-MEDIUM -impactDescription: Identify exact bottlenecks in query execution -tags: explain, analyze, diagnostics, query-plan ---- - -## Use EXPLAIN ANALYZE to Diagnose Slow Queries - -EXPLAIN ANALYZE executes the query and shows actual timings, revealing the true performance bottlenecks. - -**Incorrect (guessing at performance issues):** - -```sql --- Query is slow, but why? -select * from orders where customer_id = 123 and status = 'pending'; --- "It must be missing an index" - but which one? -``` - -**Correct (use EXPLAIN ANALYZE):** - -```sql -explain (analyze, buffers, format text) -select * from orders where customer_id = 123 and status = 'pending'; - --- Output reveals the issue: --- Seq Scan on orders (cost=0.00..25000.00 rows=50 width=100) (actual time=0.015..450.123 rows=50 loops=1) --- Filter: ((customer_id = 123) AND (status = 'pending'::text)) --- Rows Removed by Filter: 999950 --- Buffers: shared hit=5000 read=15000 --- Planning Time: 0.150 ms --- Execution Time: 450.500 ms -``` - -Key things to look for: - -```sql --- Seq Scan on large tables = missing index --- Rows Removed by Filter = poor selectivity or missing index --- Buffers: read >> hit = data not cached, needs more memory --- Nested Loop with high loops = consider different join strategy --- Sort Method: external merge = work_mem too low -``` - -Reference: [EXPLAIN](https://supabase.com/docs/guides/database/inspect) diff --git a/.augment/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md b/.augment/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md deleted file mode 100644 index d7e82f1a..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Enable pg_stat_statements for Query Analysis -impact: LOW-MEDIUM -impactDescription: Identify top resource-consuming queries -tags: pg-stat-statements, monitoring, statistics, performance ---- - -## Enable pg_stat_statements for Query Analysis - -pg_stat_statements tracks execution statistics for all queries, helping identify slow and frequent queries. - -**Incorrect (no visibility into query patterns):** - -```sql --- Database is slow, but which queries are the problem? --- No way to know without pg_stat_statements -``` - -**Correct (enable and query pg_stat_statements):** - -```sql --- Enable the extension -create extension if not exists pg_stat_statements; - --- Find slowest queries by total time -select - calls, - round(total_exec_time::numeric, 2) as total_time_ms, - round(mean_exec_time::numeric, 2) as mean_time_ms, - query -from pg_stat_statements -order by total_exec_time desc -limit 10; - --- Find most frequent queries -select calls, query -from pg_stat_statements -order by calls desc -limit 10; - --- Reset statistics after optimization -select pg_stat_statements_reset(); -``` - -Key metrics to monitor: - -```sql --- Queries with high mean time (candidates for optimization) -select query, mean_exec_time, calls -from pg_stat_statements -where mean_exec_time > 100 -- > 100ms average -order by mean_exec_time desc; -``` - -Reference: [pg_stat_statements](https://supabase.com/docs/guides/database/extensions/pg_stat_statements) diff --git a/.augment/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md b/.augment/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md deleted file mode 100644 index e0e8ea0b..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Maintain Table Statistics with VACUUM and ANALYZE -impact: MEDIUM -impactDescription: 2-10x better query plans with accurate statistics -tags: vacuum, analyze, statistics, maintenance, autovacuum ---- - -## Maintain Table Statistics with VACUUM and ANALYZE - -Outdated statistics cause the query planner to make poor decisions. VACUUM reclaims space, ANALYZE updates statistics. - -**Incorrect (stale statistics):** - -```sql --- Table has 1M rows but stats say 1000 --- Query planner chooses wrong strategy -explain select * from orders where status = 'pending'; --- Shows: Seq Scan (because stats show small table) --- Actually: Index Scan would be much faster -``` - -**Correct (maintain fresh statistics):** - -```sql --- Manually analyze after large data changes -analyze orders; - --- Analyze specific columns used in WHERE clauses -analyze orders (status, created_at); - --- Check when tables were last analyzed -select - relname, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze -from pg_stat_user_tables -order by last_analyze nulls first; -``` - -Autovacuum tuning for busy tables: - -```sql --- Increase frequency for high-churn tables -alter table orders set ( - autovacuum_vacuum_scale_factor = 0.05, -- Vacuum at 5% dead tuples (default 20%) - autovacuum_analyze_scale_factor = 0.02 -- Analyze at 2% changes (default 10%) -); - --- Check autovacuum status -select * from pg_stat_progress_vacuum; -``` - -Reference: [VACUUM](https://supabase.com/docs/guides/database/database-size#vacuum-operations) diff --git a/.augment/skills/supabase-postgres-best-practices/references/query-composite-indexes.md b/.augment/skills/supabase-postgres-best-practices/references/query-composite-indexes.md deleted file mode 100644 index fea64523..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/query-composite-indexes.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Create Composite Indexes for Multi-Column Queries -impact: HIGH -impactDescription: 5-10x faster multi-column queries -tags: indexes, composite-index, multi-column, query-optimization ---- - -## Create Composite Indexes for Multi-Column Queries - -When queries filter on multiple columns, a composite index is more efficient than separate single-column indexes. - -**Incorrect (separate indexes require bitmap scan):** - -```sql --- Two separate indexes -create index orders_status_idx on orders (status); -create index orders_created_idx on orders (created_at); - --- Query must combine both indexes (slower) -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Correct (composite index):** - -```sql --- Single composite index (leftmost column first for equality checks) -create index orders_status_created_idx on orders (status, created_at); - --- Query uses one efficient index scan -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Column order matters** - place equality columns first, range columns last: - -```sql --- Good: status (=) before created_at (>) -create index idx on orders (status, created_at); - --- Works for: WHERE status = 'pending' --- Works for: WHERE status = 'pending' AND created_at > '2024-01-01' --- Does NOT work for: WHERE created_at > '2024-01-01' (leftmost prefix rule) -``` - -Reference: [Multicolumn Indexes](https://www.postgresql.org/docs/current/indexes-multicolumn.html) diff --git a/.augment/skills/supabase-postgres-best-practices/references/query-covering-indexes.md b/.augment/skills/supabase-postgres-best-practices/references/query-covering-indexes.md deleted file mode 100644 index 9d2a4947..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/query-covering-indexes.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Use Covering Indexes to Avoid Table Lookups -impact: MEDIUM-HIGH -impactDescription: 2-5x faster queries by eliminating heap fetches -tags: indexes, covering-index, include, index-only-scan ---- - -## Use Covering Indexes to Avoid Table Lookups - -Covering indexes include all columns needed by a query, enabling index-only scans that skip the table entirely. - -**Incorrect (index scan + heap fetch):** - -```sql -create index users_email_idx on users (email); - --- Must fetch name and created_at from table heap -select email, name, created_at from users where email = 'user@example.com'; -``` - -**Correct (index-only scan with INCLUDE):** - -```sql --- Include non-searchable columns in the index -create index users_email_idx on users (email) include (name, created_at); - --- All columns served from index, no table access needed -select email, name, created_at from users where email = 'user@example.com'; -``` - -Use INCLUDE for columns you SELECT but don't filter on: - -```sql --- Searching by status, but also need customer_id and total -create index orders_status_idx on orders (status) include (customer_id, total); - -select status, customer_id, total from orders where status = 'shipped'; -``` - -Reference: [Index-Only Scans](https://www.postgresql.org/docs/current/indexes-index-only-scans.html) diff --git a/.augment/skills/supabase-postgres-best-practices/references/query-index-types.md b/.augment/skills/supabase-postgres-best-practices/references/query-index-types.md deleted file mode 100644 index 93b32590..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/query-index-types.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Choose the Right Index Type for Your Data -impact: HIGH -impactDescription: 10-100x improvement with correct index type -tags: indexes, btree, gin, gist, brin, hash, index-types ---- - -## Choose the Right Index Type for Your Data - -Different index types excel at different query patterns. The default B-tree isn't always optimal. - -**Incorrect (B-tree for JSONB containment):** - -```sql --- B-tree cannot optimize containment operators -create index products_attrs_idx on products (attributes); -select * from products where attributes @> '{"color": "red"}'; --- Full table scan - B-tree doesn't support @> operator -``` - -**Correct (GIN for JSONB):** - -```sql --- GIN supports @>, ?, ?&, ?| operators -create index products_attrs_idx on products using gin (attributes); -select * from products where attributes @> '{"color": "red"}'; -``` - -Index type guide: - -```sql --- B-tree (default): =, <, >, BETWEEN, IN, IS NULL -create index users_created_idx on users (created_at); - --- GIN: arrays, JSONB, full-text search -create index posts_tags_idx on posts using gin (tags); - --- GiST: geometric data, range types, nearest-neighbor (KNN) queries -create index locations_idx on places using gist (location); - --- BRIN: large time-series tables (10-100x smaller) -create index events_time_idx on events using brin (created_at); - --- Hash: equality-only (slightly faster than B-tree for =) -create index sessions_token_idx on sessions using hash (token); -``` - -Reference: [Index Types](https://www.postgresql.org/docs/current/indexes-types.html) diff --git a/.augment/skills/supabase-postgres-best-practices/references/query-missing-indexes.md b/.augment/skills/supabase-postgres-best-practices/references/query-missing-indexes.md deleted file mode 100644 index e6daace7..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/query-missing-indexes.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Add Indexes on WHERE and JOIN Columns -impact: CRITICAL -impactDescription: 100-1000x faster queries on large tables -tags: indexes, performance, sequential-scan, query-optimization ---- - -## Add Indexes on WHERE and JOIN Columns - -Queries filtering or joining on unindexed columns cause full table scans, which become exponentially slower as tables grow. - -**Incorrect (sequential scan on large table):** - -```sql --- No index on customer_id causes full table scan -select * from orders where customer_id = 123; - --- EXPLAIN shows: Seq Scan on orders (cost=0.00..25000.00 rows=100 width=85) -``` - -**Correct (index scan):** - -```sql --- Create index on frequently filtered column -create index orders_customer_id_idx on orders (customer_id); - -select * from orders where customer_id = 123; - --- EXPLAIN shows: Index Scan using orders_customer_id_idx (cost=0.42..8.44 rows=100 width=85) -``` - -For JOIN columns, always index the foreign key side: - -```sql --- Index the referencing column -create index orders_customer_id_idx on orders (customer_id); - -select c.name, o.total -from customers c -join orders o on o.customer_id = c.id; -``` - -Reference: [Query Optimization](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.augment/skills/supabase-postgres-best-practices/references/query-partial-indexes.md b/.augment/skills/supabase-postgres-best-practices/references/query-partial-indexes.md deleted file mode 100644 index 3e61a341..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/query-partial-indexes.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use Partial Indexes for Filtered Queries -impact: HIGH -impactDescription: 5-20x smaller indexes, faster writes and queries -tags: indexes, partial-index, query-optimization, storage ---- - -## Use Partial Indexes for Filtered Queries - -Partial indexes only include rows matching a WHERE condition, making them smaller and faster when queries consistently filter on the same condition. - -**Incorrect (full index includes irrelevant rows):** - -```sql --- Index includes all rows, even soft-deleted ones -create index users_email_idx on users (email); - --- Query always filters active users -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -**Correct (partial index matches query filter):** - -```sql --- Index only includes active users -create index users_active_email_idx on users (email) -where deleted_at is null; - --- Query uses the smaller, faster index -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -Common use cases for partial indexes: - -```sql --- Only pending orders (status rarely changes once completed) -create index orders_pending_idx on orders (created_at) -where status = 'pending'; - --- Only non-null values -create index products_sku_idx on products (sku) -where sku is not null; -``` - -Reference: [Partial Indexes](https://www.postgresql.org/docs/current/indexes-partial.html) diff --git a/.augment/skills/supabase-postgres-best-practices/references/schema-constraints.md b/.augment/skills/supabase-postgres-best-practices/references/schema-constraints.md deleted file mode 100644 index 1d2ef8f9..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/schema-constraints.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Add Constraints Safely in Migrations -impact: HIGH -impactDescription: Prevents migration failures and enables idempotent schema changes -tags: constraints, migrations, schema, alter-table ---- - -## Add Constraints Safely in Migrations - -PostgreSQL does not support `ADD CONSTRAINT IF NOT EXISTS`. Migrations using this syntax will fail. - -**Incorrect (causes syntax error):** - -```sql --- ERROR: syntax error at or near "not" (SQLSTATE 42601) -alter table public.profiles -add constraint if not exists profiles_birthchart_id_unique unique (birthchart_id); -``` - -**Correct (idempotent constraint creation):** - -```sql --- Use DO block to check before adding -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_unique' - and conrelid = 'public.profiles'::regclass - ) then - alter table public.profiles - add constraint profiles_birthchart_id_unique unique (birthchart_id); - end if; -end $$; -``` - -For all constraint types: - -```sql --- Check constraints -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'check_age_positive' - ) then - alter table users add constraint check_age_positive check (age > 0); - end if; -end $$; - --- Foreign keys -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_fkey' - ) then - alter table profiles - add constraint profiles_birthchart_id_fkey - foreign key (birthchart_id) references birthcharts(id); - end if; -end $$; -``` - -Check if constraint exists: - -```sql --- Query to check constraint existence -select conname, contype, pg_get_constraintdef(oid) -from pg_constraint -where conrelid = 'public.profiles'::regclass; - --- contype values: --- 'p' = PRIMARY KEY --- 'f' = FOREIGN KEY --- 'u' = UNIQUE --- 'c' = CHECK -``` - -Reference: [Constraints](https://www.postgresql.org/docs/current/ddl-constraints.html) diff --git a/.augment/skills/supabase-postgres-best-practices/references/schema-data-types.md b/.augment/skills/supabase-postgres-best-practices/references/schema-data-types.md deleted file mode 100644 index f253a581..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/schema-data-types.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Choose Appropriate Data Types -impact: HIGH -impactDescription: 50% storage reduction, faster comparisons -tags: data-types, schema, storage, performance ---- - -## Choose Appropriate Data Types - -Using the right data types reduces storage, improves query performance, and prevents bugs. - -**Incorrect (wrong data types):** - -```sql -create table users ( - id int, -- Will overflow at 2.1 billion - email varchar(255), -- Unnecessary length limit - created_at timestamp, -- Missing timezone info - is_active varchar(5), -- String for boolean - price varchar(20) -- String for numeric -); -``` - -**Correct (appropriate data types):** - -```sql -create table users ( - id bigint generated always as identity primary key, -- 9 quintillion max - email text, -- No artificial limit, same performance as varchar - created_at timestamptz, -- Always store timezone-aware timestamps - is_active boolean default true, -- 1 byte vs variable string length - price numeric(10,2) -- Exact decimal arithmetic -); -``` - -Key guidelines: - -```sql --- IDs: use bigint, not int (future-proofing) --- Strings: use text, not varchar(n) unless constraint needed --- Time: use timestamptz, not timestamp --- Money: use numeric, not float (precision matters) --- Enums: use text with check constraint or create enum type -``` - -Reference: [Data Types](https://www.postgresql.org/docs/current/datatype.html) diff --git a/.augment/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md b/.augment/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md deleted file mode 100644 index 6c3d6ff6..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Index Foreign Key Columns -impact: HIGH -impactDescription: 10-100x faster JOINs and CASCADE operations -tags: foreign-key, indexes, joins, schema ---- - -## Index Foreign Key Columns - -Postgres does not automatically index foreign key columns. Missing indexes cause slow JOINs and CASCADE operations. - -**Incorrect (unindexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- No index on customer_id! --- JOINs and ON DELETE CASCADE both require full table scan -select * from orders where customer_id = 123; -- Seq Scan -delete from customers where id = 123; -- Locks table, scans all orders -``` - -**Correct (indexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- Always index the FK column -create index orders_customer_id_idx on orders (customer_id); - --- Now JOINs and cascades are fast -select * from orders where customer_id = 123; -- Index Scan -delete from customers where id = 123; -- Uses index, fast cascade -``` - -Find missing FK indexes: - -```sql -select - conrelid::regclass as table_name, - a.attname as fk_column -from pg_constraint c -join pg_attribute a on a.attrelid = c.conrelid and a.attnum = any(c.conkey) -where c.contype = 'f' - and not exists ( - select 1 from pg_index i - where i.indrelid = c.conrelid and a.attnum = any(i.indkey) - ); -``` - -Reference: [Foreign Keys](https://www.postgresql.org/docs/current/ddl-constraints.html#DDL-CONSTRAINTS-FK) diff --git a/.augment/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md b/.augment/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md deleted file mode 100644 index f0072940..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use Lowercase Identifiers for Compatibility -impact: MEDIUM -impactDescription: Avoid case-sensitivity bugs with tools, ORMs, and AI assistants -tags: naming, identifiers, case-sensitivity, schema, conventions ---- - -## Use Lowercase Identifiers for Compatibility - -PostgreSQL folds unquoted identifiers to lowercase. Quoted mixed-case identifiers require quotes forever and cause issues with tools, ORMs, and AI assistants that may not recognize them. - -**Incorrect (mixed-case identifiers):** - -```sql --- Quoted identifiers preserve case but require quotes everywhere -CREATE TABLE "Users" ( - "userId" bigint PRIMARY KEY, - "firstName" text, - "lastName" text -); - --- Must always quote or queries fail -SELECT "firstName" FROM "Users" WHERE "userId" = 1; - --- This fails - Users becomes users without quotes -SELECT firstName FROM Users; --- ERROR: relation "users" does not exist -``` - -**Correct (lowercase snake_case):** - -```sql --- Unquoted lowercase identifiers are portable and tool-friendly -CREATE TABLE users ( - user_id bigint PRIMARY KEY, - first_name text, - last_name text -); - --- Works without quotes, recognized by all tools -SELECT first_name FROM users WHERE user_id = 1; -``` - -Common sources of mixed-case identifiers: - -```sql --- ORMs often generate quoted camelCase - configure them to use snake_case --- Migrations from other databases may preserve original casing --- Some GUI tools quote identifiers by default - disable this - --- If stuck with mixed-case, create views as a compatibility layer -CREATE VIEW users AS SELECT "userId" AS user_id, "firstName" AS first_name FROM "Users"; -``` - -Reference: [Identifiers and Key Words](https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS) diff --git a/.augment/skills/supabase-postgres-best-practices/references/schema-partitioning.md b/.augment/skills/supabase-postgres-best-practices/references/schema-partitioning.md deleted file mode 100644 index 13137a03..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/schema-partitioning.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Partition Large Tables for Better Performance -impact: MEDIUM-HIGH -impactDescription: 5-20x faster queries and maintenance on large tables -tags: partitioning, large-tables, time-series, performance ---- - -## Partition Large Tables for Better Performance - -Partitioning splits a large table into smaller pieces, improving query performance and maintenance operations. - -**Incorrect (single large table):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz, - data jsonb -); - --- 500M rows, queries scan everything -select * from events where created_at > '2024-01-01'; -- Slow -vacuum events; -- Takes hours, locks table -``` - -**Correct (partitioned by time range):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz not null, - data jsonb -) partition by range (created_at); - --- Create partitions for each month -create table events_2024_01 partition of events - for values from ('2024-01-01') to ('2024-02-01'); - -create table events_2024_02 partition of events - for values from ('2024-02-01') to ('2024-03-01'); - --- Queries only scan relevant partitions -select * from events where created_at > '2024-01-15'; -- Only scans events_2024_01+ - --- Drop old data instantly -drop table events_2023_01; -- Instant vs DELETE taking hours -``` - -When to partition: - -- Tables > 100M rows -- Time-series data with date-based queries -- Need to efficiently drop old data - -Reference: [Table Partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) diff --git a/.augment/skills/supabase-postgres-best-practices/references/schema-primary-keys.md b/.augment/skills/supabase-postgres-best-practices/references/schema-primary-keys.md deleted file mode 100644 index fb0fbb16..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/schema-primary-keys.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Select Optimal Primary Key Strategy -impact: HIGH -impactDescription: Better index locality, reduced fragmentation -tags: primary-key, identity, uuid, serial, schema ---- - -## Select Optimal Primary Key Strategy - -Primary key choice affects insert performance, index size, and replication -efficiency. - -**Incorrect (problematic PK choices):** - -```sql --- identity is the SQL-standard approach -create table users ( - id serial primary key -- Works, but IDENTITY is recommended -); - --- Random UUIDs (v4) cause index fragmentation -create table orders ( - id uuid default gen_random_uuid() primary key -- UUIDv4 = random = scattered inserts -); -``` - -**Correct (optimal PK strategies):** - -```sql --- Use IDENTITY for sequential IDs (SQL-standard, best for most cases) -create table users ( - id bigint generated always as identity primary key -); - --- For distributed systems needing UUIDs, use UUIDv7 (time-ordered) --- Requires pg_uuidv7 extension: create extension pg_uuidv7; -create table orders ( - id uuid default uuid_generate_v7() primary key -- Time-ordered, no fragmentation -); - --- Alternative: time-prefixed IDs for sortable, distributed IDs (no extension needed) -create table events ( - id text default concat( - to_char(now() at time zone 'utc', 'YYYYMMDDHH24MISSMS'), - gen_random_uuid()::text - ) primary key -); -``` - -Guidelines: - -- Single database: `bigint identity` (sequential, 8 bytes, SQL-standard) -- Distributed/exposed IDs: UUIDv7 (requires pg_uuidv7) or ULID (time-ordered, no - fragmentation) -- `serial` works but `identity` is SQL-standard and preferred for new - applications -- Avoid random UUIDs (v4) as primary keys on large tables (causes index - fragmentation) - -Reference: -[Identity Columns](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-PARMS-GENERATED-IDENTITY) diff --git a/.augment/skills/supabase-postgres-best-practices/references/security-privileges.md b/.augment/skills/supabase-postgres-best-practices/references/security-privileges.md deleted file mode 100644 index 448ec345..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/security-privileges.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Apply Principle of Least Privilege -impact: MEDIUM -impactDescription: Reduced attack surface, better audit trail -tags: privileges, security, roles, permissions ---- - -## Apply Principle of Least Privilege - -Grant only the minimum permissions required. Never use superuser for application queries. - -**Incorrect (overly broad permissions):** - -```sql --- Application uses superuser connection --- Or grants ALL to application role -grant all privileges on all tables in schema public to app_user; -grant all privileges on all sequences in schema public to app_user; - --- Any SQL injection becomes catastrophic --- drop table users; cascades to everything -``` - -**Correct (minimal, specific grants):** - -```sql --- Create role with no default privileges -create role app_readonly nologin; - --- Grant only SELECT on specific tables -grant usage on schema public to app_readonly; -grant select on public.products, public.categories to app_readonly; - --- Create role for writes with limited scope -create role app_writer nologin; -grant usage on schema public to app_writer; -grant select, insert, update on public.orders to app_writer; -grant usage on sequence orders_id_seq to app_writer; --- No DELETE permission - --- Login role inherits from these -create role app_user login password 'xxx'; -grant app_writer to app_user; -``` - -Revoke public defaults: - -```sql --- Revoke default public access -revoke all on schema public from public; -revoke all on all tables in schema public from public; -``` - -Reference: [Roles and Privileges](https://supabase.com/blog/postgres-roles-and-privileges) diff --git a/.augment/skills/supabase-postgres-best-practices/references/security-rls-basics.md b/.augment/skills/supabase-postgres-best-practices/references/security-rls-basics.md deleted file mode 100644 index c61e1a85..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/security-rls-basics.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Enable Row Level Security for Multi-Tenant Data -impact: CRITICAL -impactDescription: Database-enforced tenant isolation, prevent data leaks -tags: rls, row-level-security, multi-tenant, security ---- - -## Enable Row Level Security for Multi-Tenant Data - -Row Level Security (RLS) enforces data access at the database level, ensuring users only see their own data. - -**Incorrect (application-level filtering only):** - -```sql --- Relying only on application to filter -select * from orders where user_id = $current_user_id; - --- Bug or bypass means all data is exposed! -select * from orders; -- Returns ALL orders -``` - -**Correct (database-enforced RLS):** - -```sql --- Enable RLS on the table -alter table orders enable row level security; - --- Create policy for users to see only their orders -create policy orders_user_policy on orders - for all - using (user_id = current_setting('app.current_user_id')::bigint); - --- Force RLS even for table owners -alter table orders force row level security; - --- Set user context and query -set app.current_user_id = '123'; -select * from orders; -- Only returns orders for user 123 -``` - -Policy for authenticated role: - -```sql -create policy orders_user_policy on orders - for all - to authenticated - using (user_id = auth.uid()); -``` - -Reference: [Row Level Security](https://supabase.com/docs/guides/database/postgres/row-level-security) diff --git a/.augment/skills/supabase-postgres-best-practices/references/security-rls-performance.md b/.augment/skills/supabase-postgres-best-practices/references/security-rls-performance.md deleted file mode 100644 index b32d92f7..00000000 --- a/.augment/skills/supabase-postgres-best-practices/references/security-rls-performance.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Optimize RLS Policies for Performance -impact: HIGH -impactDescription: 5-10x faster RLS queries with proper patterns -tags: rls, performance, security, optimization ---- - -## Optimize RLS Policies for Performance - -Poorly written RLS policies can cause severe performance issues. Use subqueries and indexes strategically. - -**Incorrect (function called for every row):** - -```sql -create policy orders_policy on orders - using (auth.uid() = user_id); -- auth.uid() called per row! - --- With 1M rows, auth.uid() is called 1M times -``` - -**Correct (wrap functions in SELECT):** - -```sql -create policy orders_policy on orders - using ((select auth.uid()) = user_id); -- Called once, cached - --- 100x+ faster on large tables -``` - -Use security definer functions for complex checks: - -```sql --- Create helper function (runs as definer, bypasses RLS) -create or replace function is_team_member(team_id bigint) -returns boolean -language sql -security definer -set search_path = '' -as $$ - select exists ( - select 1 from public.team_members - where team_id = $1 and user_id = (select auth.uid()) - ); -$$; - --- Use in policy (indexed lookup, not per-row check) -create policy team_orders_policy on orders - using ((select is_team_member(team_id))); -``` - -Always add indexes on columns used in RLS policies: - -```sql -create index orders_user_id_idx on orders (user_id); -``` - -Reference: [RLS Performance](https://supabase.com/docs/guides/database/postgres/row-level-security#rls-performance-recommendations) diff --git a/.claude/skills/supabase-postgres-best-practices/AGENTS.md b/.claude/skills/supabase-postgres-best-practices/AGENTS.md deleted file mode 100644 index a7baf445..00000000 --- a/.claude/skills/supabase-postgres-best-practices/AGENTS.md +++ /dev/null @@ -1,68 +0,0 @@ -# Supabase Postgres Best Practices - -## Structure - -``` -supabase-postgres-best-practices/ - SKILL.md # Main skill file - read this first - AGENTS.md # This navigation guide - CLAUDE.md # Symlink to AGENTS.md - references/ # Detailed reference files -``` - -## Usage - -1. Read `SKILL.md` for the main skill instructions -2. Browse `references/` for detailed documentation on specific topics -3. Reference files are loaded on-demand - read only what you need - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.claude/skills/supabase-postgres-best-practices/CLAUDE.md b/.claude/skills/supabase-postgres-best-practices/CLAUDE.md deleted file mode 100644 index 47dc3e3d..00000000 --- a/.claude/skills/supabase-postgres-best-practices/CLAUDE.md +++ /dev/null @@ -1 +0,0 @@ -AGENTS.md \ No newline at end of file diff --git a/.claude/skills/supabase-postgres-best-practices/README.md b/.claude/skills/supabase-postgres-best-practices/README.md deleted file mode 100644 index f1a374e1..00000000 --- a/.claude/skills/supabase-postgres-best-practices/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# Supabase Postgres Best Practices - Contributor Guide - -This skill contains Postgres performance optimization references optimized for -AI agents and LLMs. It follows the [Agent Skills Open Standard](https://agentskills.io/). - -## Quick Start - -```bash -# From repository root -npm install - -# Validate existing references -npm run validate - -# Build AGENTS.md -npm run build -``` - -## Creating a New Reference - -1. **Choose a section prefix** based on the category: - - `query-` Query Performance (CRITICAL) - - `conn-` Connection Management (CRITICAL) - - `security-` Security & RLS (CRITICAL) - - `schema-` Schema Design (HIGH) - - `lock-` Concurrency & Locking (MEDIUM-HIGH) - - `data-` Data Access Patterns (MEDIUM) - - `monitor-` Monitoring & Diagnostics (LOW-MEDIUM) - - `advanced-` Advanced Features (LOW) - -2. **Copy the template**: - ```bash - cp references/_template.md references/query-your-reference-name.md - ``` - -3. **Fill in the content** following the template structure - -4. **Validate and build**: - ```bash - npm run validate - npm run build - ``` - -5. **Review** the generated `AGENTS.md` - -## Skill Structure - -``` -skills/supabase-postgres-best-practices/ -β”œβ”€β”€ SKILL.md # Agent-facing skill manifest (Agent Skills spec) -β”œβ”€β”€ AGENTS.md # [GENERATED] Compiled references document -β”œβ”€β”€ README.md # This file -└── references/ - β”œβ”€β”€ _template.md # Reference template - β”œβ”€β”€ _sections.md # Section definitions - β”œβ”€β”€ _contributing.md # Writing guidelines - └── *.md # Individual references - -packages/skills-build/ -β”œβ”€β”€ src/ # Generic build system source -└── package.json # NPM scripts -``` - -## Reference File Structure - -See `references/_template.md` for the complete template. Key elements: - -````markdown ---- -title: Clear, Action-Oriented Title -impact: CRITICAL|HIGH|MEDIUM-HIGH|MEDIUM|LOW-MEDIUM|LOW -impactDescription: Quantified benefit (e.g., "10-100x faster") -tags: relevant, keywords ---- - -## [Title] - -[1-2 sentence explanation] - -**Incorrect (description):** - -```sql --- Comment explaining what's wrong -[Bad SQL example] -``` -```` - -**Correct (description):** - -```sql --- Comment explaining why this is better -[Good SQL example] -``` - -``` -## Writing Guidelines - -See `references/_contributing.md` for detailed guidelines. Key principles: - -1. **Show concrete transformations** - "Change X to Y", not abstract advice -2. **Error-first structure** - Show the problem before the solution -3. **Quantify impact** - Include specific metrics (10x faster, 50% smaller) -4. **Self-contained examples** - Complete, runnable SQL -5. **Semantic naming** - Use meaningful names (users, email), not (table1, col1) - -## Impact Levels - -| Level | Improvement | Examples | -|-------|-------------|----------| -| CRITICAL | 10-100x | Missing indexes, connection exhaustion | -| HIGH | 5-20x | Wrong index types, poor partitioning | -| MEDIUM-HIGH | 2-5x | N+1 queries, RLS optimization | -| MEDIUM | 1.5-3x | Redundant indexes, stale statistics | -| LOW-MEDIUM | 1.2-2x | VACUUM tuning, config tweaks | -| LOW | Incremental | Advanced patterns, edge cases | -``` diff --git a/.claude/skills/supabase-postgres-best-practices/SKILL.md b/.claude/skills/supabase-postgres-best-practices/SKILL.md deleted file mode 100644 index f80be156..00000000 --- a/.claude/skills/supabase-postgres-best-practices/SKILL.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -name: supabase-postgres-best-practices -description: Postgres performance optimization and best practices from Supabase. Use this skill when writing, reviewing, or optimizing Postgres queries, schema designs, or database configurations. -license: MIT -metadata: - author: supabase - version: "1.1.0" - organization: Supabase - date: January 2026 - abstract: Comprehensive Postgres performance optimization guide for developers using Supabase and Postgres. Contains performance rules across 8 categories, prioritized by impact from critical (query performance, connection management) to incremental (advanced features). Each rule includes detailed explanations, incorrect vs. correct SQL examples, query plan analysis, and specific performance metrics to guide automated optimization and code generation. ---- - -# Supabase Postgres Best Practices - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.claude/skills/supabase-postgres-best-practices/references/_contributing.md b/.claude/skills/supabase-postgres-best-practices/references/_contributing.md deleted file mode 100644 index 10de8ecb..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/_contributing.md +++ /dev/null @@ -1,171 +0,0 @@ -# Writing Guidelines for Postgres References - -This document provides guidelines for creating effective Postgres best -practice references that work well with AI agents and LLMs. - -## Key Principles - -### 1. Concrete Transformation Patterns - -Show exact SQL rewrites. Avoid philosophical advice. - -**Good:** "Use `WHERE id = ANY(ARRAY[...])` instead of -`WHERE id IN (SELECT ...)`" **Bad:** "Design good schemas" - -### 2. Error-First Structure - -Always show the problematic pattern first, then the solution. This trains agents -to recognize anti-patterns. - -```markdown -**Incorrect (sequential queries):** [bad example] - -**Correct (batched query):** [good example] -``` - -### 3. Quantified Impact - -Include specific metrics. Helps agents prioritize fixes. - -**Good:** "10x faster queries", "50% smaller index", "Eliminates N+1" -**Bad:** "Faster", "Better", "More efficient" - -### 4. Self-Contained Examples - -Examples should be complete and runnable (or close to it). Include `CREATE TABLE` -if context is needed. - -```sql --- Include table definition when needed for clarity -CREATE TABLE users ( - id bigint PRIMARY KEY, - email text NOT NULL, - deleted_at timestamptz -); - --- Now show the index -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; -``` - -### 5. Semantic Naming - -Use meaningful table/column names. Names carry intent for LLMs. - -**Good:** `users`, `email`, `created_at`, `is_active` -**Bad:** `table1`, `col1`, `field`, `flag` - ---- - -## Code Example Standards - -### SQL Formatting - -```sql --- Use lowercase keywords, clear formatting -CREATE INDEX CONCURRENTLY users_email_idx - ON users(email) - WHERE deleted_at IS NULL; - --- Not cramped or ALL CAPS -CREATE INDEX CONCURRENTLY USERS_EMAIL_IDX ON USERS(EMAIL) WHERE DELETED_AT IS NULL; -``` - -### Comments - -- Explain _why_, not _what_ -- Highlight performance implications -- Point out common pitfalls - -### Language Tags - -- `sql` - Standard SQL queries -- `plpgsql` - Stored procedures/functions -- `typescript` - Application code (when needed) -- `python` - Application code (when needed) - ---- - -## When to Include Application Code - -**Default: SQL Only** - -Most references should focus on pure SQL patterns. This keeps examples portable. - -**Include Application Code When:** - -- Connection pooling configuration -- Transaction management in application context -- ORM anti-patterns (N+1 in Prisma/TypeORM) -- Prepared statement usage - -**Format for Mixed Examples:** - -````markdown -**Incorrect (N+1 in application):** - -```typescript -for (const user of users) { - const posts = await db.query("SELECT * FROM posts WHERE user_id = $1", [ - user.id, - ]); -} -``` -```` - -**Correct (batch query):** - -```typescript -const posts = await db.query("SELECT * FROM posts WHERE user_id = ANY($1)", [ - userIds, -]); -``` - ---- - -## Impact Level Guidelines - -| Level | Improvement | Use When | -|-------|-------------|----------| -| **CRITICAL** | 10-100x | Missing indexes, connection exhaustion, sequential scans on large tables | -| **HIGH** | 5-20x | Wrong index types, poor partitioning, missing covering indexes | -| **MEDIUM-HIGH** | 2-5x | N+1 queries, inefficient pagination, RLS optimization | -| **MEDIUM** | 1.5-3x | Redundant indexes, query plan instability | -| **LOW-MEDIUM** | 1.2-2x | VACUUM tuning, configuration tweaks | -| **LOW** | Incremental | Advanced patterns, edge cases | - ---- - -## Reference Standards - -**Primary Sources:** - -- Official Postgres documentation -- Supabase documentation -- Postgres wiki -- Established blogs (2ndQuadrant, Crunchy Data) - -**Format:** - -```markdown -Reference: -[Postgres Indexes](https://www.postgresql.org/docs/current/indexes.html) -``` - ---- - -## Review Checklist - -Before submitting a reference: - -- [ ] Title is clear and action-oriented -- [ ] Impact level matches the performance gain -- [ ] impactDescription includes quantification -- [ ] Explanation is concise (1-2 sentences) -- [ ] Has at least 1 **Incorrect** SQL example -- [ ] Has at least 1 **Correct** SQL example -- [ ] SQL uses semantic naming -- [ ] Comments explain _why_, not _what_ -- [ ] Trade-offs mentioned if applicable -- [ ] Reference links included -- [ ] `npm run validate` passes -- [ ] `npm run build` generates correct output diff --git a/.claude/skills/supabase-postgres-best-practices/references/_sections.md b/.claude/skills/supabase-postgres-best-practices/references/_sections.md deleted file mode 100644 index 8ba57c23..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/_sections.md +++ /dev/null @@ -1,39 +0,0 @@ -# Section Definitions - -This file defines the rule categories for Postgres best practices. Rules are automatically assigned to sections based on their filename prefix. - -Take the examples below as pure demonstrative. Replace each section with the actual rule categories for Postgres best practices. - ---- - -## 1. Query Performance (query) -**Impact:** CRITICAL -**Description:** Slow queries, missing indexes, inefficient query plans. The most common source of Postgres performance issues. - -## 2. Connection Management (conn) -**Impact:** CRITICAL -**Description:** Connection pooling, limits, and serverless strategies. Critical for applications with high concurrency or serverless deployments. - -## 3. Security & RLS (security) -**Impact:** CRITICAL -**Description:** Row-Level Security policies, privilege management, and authentication patterns. - -## 4. Schema Design (schema) -**Impact:** HIGH -**Description:** Table design, index strategies, partitioning, and data type selection. Foundation for long-term performance. - -## 5. Concurrency & Locking (lock) -**Impact:** MEDIUM-HIGH -**Description:** Transaction management, isolation levels, deadlock prevention, and lock contention patterns. - -## 6. Data Access Patterns (data) -**Impact:** MEDIUM -**Description:** N+1 query elimination, batch operations, cursor-based pagination, and efficient data fetching. - -## 7. Monitoring & Diagnostics (monitor) -**Impact:** LOW-MEDIUM -**Description:** Using pg_stat_statements, EXPLAIN ANALYZE, metrics collection, and performance diagnostics. - -## 8. Advanced Features (advanced) -**Impact:** LOW -**Description:** Full-text search, JSONB optimization, PostGIS, extensions, and advanced Postgres features. diff --git a/.claude/skills/supabase-postgres-best-practices/references/_template.md b/.claude/skills/supabase-postgres-best-practices/references/_template.md deleted file mode 100644 index 91ace90e..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/_template.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Clear, Action-Oriented Title (e.g., "Use Partial Indexes for Filtered Queries") -impact: MEDIUM -impactDescription: 5-20x query speedup for filtered queries -tags: indexes, query-optimization, performance ---- - -## [Rule Title] - -[1-2 sentence explanation of the problem and why it matters. Focus on performance impact.] - -**Incorrect (describe the problem):** - -```sql --- Comment explaining what makes this slow/problematic -CREATE INDEX users_email_idx ON users(email); - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- This scans deleted records unnecessarily -``` - -**Correct (describe the solution):** - -```sql --- Comment explaining why this is better -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- Only indexes active users, 10x smaller index, faster queries -``` - -[Optional: Additional context, edge cases, or trade-offs] - -Reference: [Postgres Docs](https://www.postgresql.org/docs/current/) diff --git a/.claude/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md b/.claude/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md deleted file mode 100644 index 582cbeaa..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use tsvector for Full-Text Search -impact: MEDIUM -impactDescription: 100x faster than LIKE, with ranking support -tags: full-text-search, tsvector, gin, search ---- - -## Use tsvector for Full-Text Search - -LIKE with wildcards can't use indexes. Full-text search with tsvector is orders of magnitude faster. - -**Incorrect (LIKE pattern matching):** - -```sql --- Cannot use index, scans all rows -select * from articles where content like '%postgresql%'; - --- Case-insensitive makes it worse -select * from articles where lower(content) like '%postgresql%'; -``` - -**Correct (full-text search with tsvector):** - -```sql --- Add tsvector column and index -alter table articles add column search_vector tsvector - generated always as (to_tsvector('english', coalesce(title,'') || ' ' || coalesce(content,''))) stored; - -create index articles_search_idx on articles using gin (search_vector); - --- Fast full-text search -select * from articles -where search_vector @@ to_tsquery('english', 'postgresql & performance'); - --- With ranking -select *, ts_rank(search_vector, query) as rank -from articles, to_tsquery('english', 'postgresql') query -where search_vector @@ query -order by rank desc; -``` - -Search multiple terms: - -```sql --- AND: both terms required -to_tsquery('postgresql & performance') - --- OR: either term -to_tsquery('postgresql | mysql') - --- Prefix matching -to_tsquery('post:*') -``` - -Reference: [Full Text Search](https://supabase.com/docs/guides/database/full-text-search) diff --git a/.claude/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md b/.claude/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md deleted file mode 100644 index e3d261ea..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Index JSONB Columns for Efficient Querying -impact: MEDIUM -impactDescription: 10-100x faster JSONB queries with proper indexing -tags: jsonb, gin, indexes, json ---- - -## Index JSONB Columns for Efficient Querying - -JSONB queries without indexes scan the entire table. Use GIN indexes for containment queries. - -**Incorrect (no index on JSONB):** - -```sql -create table products ( - id bigint primary key, - attributes jsonb -); - --- Full table scan for every query -select * from products where attributes @> '{"color": "red"}'; -select * from products where attributes->>'brand' = 'Nike'; -``` - -**Correct (GIN index for JSONB):** - -```sql --- GIN index for containment operators (@>, ?, ?&, ?|) -create index products_attrs_gin on products using gin (attributes); - --- Now containment queries use the index -select * from products where attributes @> '{"color": "red"}'; - --- For specific key lookups, use expression index -create index products_brand_idx on products ((attributes->>'brand')); -select * from products where attributes->>'brand' = 'Nike'; -``` - -Choose the right operator class: - -```sql --- jsonb_ops (default): supports all operators, larger index -create index idx1 on products using gin (attributes); - --- jsonb_path_ops: only @> operator, but 2-3x smaller index -create index idx2 on products using gin (attributes jsonb_path_ops); -``` - -Reference: [JSONB Indexes](https://www.postgresql.org/docs/current/datatype-json.html#JSON-INDEXING) diff --git a/.claude/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md b/.claude/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md deleted file mode 100644 index 40b9cc50..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Configure Idle Connection Timeouts -impact: HIGH -impactDescription: Reclaim 30-50% of connection slots from idle clients -tags: connections, timeout, idle, resource-management ---- - -## Configure Idle Connection Timeouts - -Idle connections waste resources. Configure timeouts to automatically reclaim them. - -**Incorrect (connections held indefinitely):** - -```sql --- No timeout configured -show idle_in_transaction_session_timeout; -- 0 (disabled) - --- Connections stay open forever, even when idle -select pid, state, state_change, query -from pg_stat_activity -where state = 'idle in transaction'; --- Shows transactions idle for hours, holding locks -``` - -**Correct (automatic cleanup of idle connections):** - -```sql --- Terminate connections idle in transaction after 30 seconds -alter system set idle_in_transaction_session_timeout = '30s'; - --- Terminate completely idle connections after 10 minutes -alter system set idle_session_timeout = '10min'; - --- Reload configuration -select pg_reload_conf(); -``` - -For pooled connections, configure at the pooler level: - -```ini -# pgbouncer.ini -server_idle_timeout = 60 -client_idle_timeout = 300 -``` - -Reference: [Connection Timeouts](https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT) diff --git a/.claude/skills/supabase-postgres-best-practices/references/conn-limits.md b/.claude/skills/supabase-postgres-best-practices/references/conn-limits.md deleted file mode 100644 index cb3e400c..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/conn-limits.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Set Appropriate Connection Limits -impact: CRITICAL -impactDescription: Prevent database crashes and memory exhaustion -tags: connections, max-connections, limits, stability ---- - -## Set Appropriate Connection Limits - -Too many connections exhaust memory and degrade performance. Set limits based on available resources. - -**Incorrect (unlimited or excessive connections):** - -```sql --- Default max_connections = 100, but often increased blindly -show max_connections; -- 500 (way too high for 4GB RAM) - --- Each connection uses 1-3MB RAM --- 500 connections * 2MB = 1GB just for connections! --- Out of memory errors under load -``` - -**Correct (calculate based on resources):** - -```sql --- Formula: max_connections = (RAM in MB / 5MB per connection) - reserved --- For 4GB RAM: (4096 / 5) - 10 = ~800 theoretical max --- But practically, 100-200 is better for query performance - --- Recommended settings for 4GB RAM -alter system set max_connections = 100; - --- Also set work_mem appropriately --- work_mem * max_connections should not exceed 25% of RAM -alter system set work_mem = '8MB'; -- 8MB * 100 = 800MB max -``` - -Monitor connection usage: - -```sql -select count(*), state from pg_stat_activity group by state; -``` - -Reference: [Database Connections](https://supabase.com/docs/guides/platform/performance#connection-management) diff --git a/.claude/skills/supabase-postgres-best-practices/references/conn-pooling.md b/.claude/skills/supabase-postgres-best-practices/references/conn-pooling.md deleted file mode 100644 index e2ebd581..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/conn-pooling.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Use Connection Pooling for All Applications -impact: CRITICAL -impactDescription: Handle 10-100x more concurrent users -tags: connection-pooling, pgbouncer, performance, scalability ---- - -## Use Connection Pooling for All Applications - -Postgres connections are expensive (1-3MB RAM each). Without pooling, applications exhaust connections under load. - -**Incorrect (new connection per request):** - -```sql --- Each request creates a new connection --- Application code: db.connect() per request --- Result: 500 concurrent users = 500 connections = crashed database - --- Check current connections -select count(*) from pg_stat_activity; -- 487 connections! -``` - -**Correct (connection pooling):** - -```sql --- Use a pooler like PgBouncer between app and database --- Application connects to pooler, pooler reuses a small pool to Postgres - --- Configure pool_size based on: (CPU cores * 2) + spindle_count --- Example for 4 cores: pool_size = 10 - --- Result: 500 concurrent users share 10 actual connections -select count(*) from pg_stat_activity; -- 10 connections -``` - -Pool modes: - -- **Transaction mode**: connection returned after each transaction (best for most apps) -- **Session mode**: connection held for entire session (needed for prepared statements, temp tables) - -Reference: [Connection Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pooler) diff --git a/.claude/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md b/.claude/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md deleted file mode 100644 index 555547d8..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Use Prepared Statements Correctly with Pooling -impact: HIGH -impactDescription: Avoid prepared statement conflicts in pooled environments -tags: prepared-statements, connection-pooling, transaction-mode ---- - -## Use Prepared Statements Correctly with Pooling - -Prepared statements are tied to individual database connections. In transaction-mode pooling, connections are shared, causing conflicts. - -**Incorrect (named prepared statements with transaction pooling):** - -```sql --- Named prepared statement -prepare get_user as select * from users where id = $1; - --- In transaction mode pooling, next request may get different connection -execute get_user(123); --- ERROR: prepared statement "get_user" does not exist -``` - -**Correct (use unnamed statements or session mode):** - -```sql --- Option 1: Use unnamed prepared statements (most ORMs do this automatically) --- The query is prepared and executed in a single protocol message - --- Option 2: Deallocate after use in transaction mode -prepare get_user as select * from users where id = $1; -execute get_user(123); -deallocate get_user; - --- Option 3: Use session mode pooling (port 5432 vs 6543) --- Connection is held for entire session, prepared statements persist -``` - -Check your driver settings: - -```sql --- Many drivers use prepared statements by default --- Node.js pg: { prepare: false } to disable --- JDBC: prepareThreshold=0 to disable -``` - -Reference: [Prepared Statements with Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pool-modes) diff --git a/.claude/skills/supabase-postgres-best-practices/references/data-batch-inserts.md b/.claude/skills/supabase-postgres-best-practices/references/data-batch-inserts.md deleted file mode 100644 index 997947cb..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/data-batch-inserts.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Batch INSERT Statements for Bulk Data -impact: MEDIUM -impactDescription: 10-50x faster bulk inserts -tags: batch, insert, bulk, performance, copy ---- - -## Batch INSERT Statements for Bulk Data - -Individual INSERT statements have high overhead. Batch multiple rows in single statements or use COPY. - -**Incorrect (individual inserts):** - -```sql --- Each insert is a separate transaction and round trip -insert into events (user_id, action) values (1, 'click'); -insert into events (user_id, action) values (1, 'view'); -insert into events (user_id, action) values (2, 'click'); --- ... 1000 more individual inserts - --- 1000 inserts = 1000 round trips = slow -``` - -**Correct (batch insert):** - -```sql --- Multiple rows in single statement -insert into events (user_id, action) values - (1, 'click'), - (1, 'view'), - (2, 'click'), - -- ... up to ~1000 rows per batch - (999, 'view'); - --- One round trip for 1000 rows -``` - -For large imports, use COPY: - -```sql --- COPY is fastest for bulk loading -copy events (user_id, action, created_at) -from '/path/to/data.csv' -with (format csv, header true); - --- Or from stdin in application -copy events (user_id, action) from stdin with (format csv); -1,click -1,view -2,click -\. -``` - -Reference: [COPY](https://www.postgresql.org/docs/current/sql-copy.html) diff --git a/.claude/skills/supabase-postgres-best-practices/references/data-n-plus-one.md b/.claude/skills/supabase-postgres-best-practices/references/data-n-plus-one.md deleted file mode 100644 index 2109186f..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/data-n-plus-one.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Eliminate N+1 Queries with Batch Loading -impact: MEDIUM-HIGH -impactDescription: 10-100x fewer database round trips -tags: n-plus-one, batch, performance, queries ---- - -## Eliminate N+1 Queries with Batch Loading - -N+1 queries execute one query per item in a loop. Batch them into a single query using arrays or JOINs. - -**Incorrect (N+1 queries):** - -```sql --- First query: get all users -select id from users where active = true; -- Returns 100 IDs - --- Then N queries, one per user -select * from orders where user_id = 1; -select * from orders where user_id = 2; -select * from orders where user_id = 3; --- ... 97 more queries! - --- Total: 101 round trips to database -``` - -**Correct (single batch query):** - -```sql --- Collect IDs and query once with ANY -select * from orders where user_id = any(array[1, 2, 3, ...]); - --- Or use JOIN instead of loop -select u.id, u.name, o.* -from users u -left join orders o on o.user_id = u.id -where u.active = true; - --- Total: 1 round trip -``` - -Application pattern: - -```sql --- Instead of looping in application code: --- for user in users: db.query("SELECT * FROM orders WHERE user_id = $1", user.id) - --- Pass array parameter: -select * from orders where user_id = any($1::bigint[]); --- Application passes: [1, 2, 3, 4, 5, ...] -``` - -Reference: [N+1 Query Problem](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.claude/skills/supabase-postgres-best-practices/references/data-pagination.md b/.claude/skills/supabase-postgres-best-practices/references/data-pagination.md deleted file mode 100644 index 633d8393..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/data-pagination.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use Cursor-Based Pagination Instead of OFFSET -impact: MEDIUM-HIGH -impactDescription: Consistent O(1) performance regardless of page depth -tags: pagination, cursor, keyset, offset, performance ---- - -## Use Cursor-Based Pagination Instead of OFFSET - -OFFSET-based pagination scans all skipped rows, getting slower on deeper pages. Cursor pagination is O(1). - -**Incorrect (OFFSET pagination):** - -```sql --- Page 1: scans 20 rows -select * from products order by id limit 20 offset 0; - --- Page 100: scans 2000 rows to skip 1980 -select * from products order by id limit 20 offset 1980; - --- Page 10000: scans 200,000 rows! -select * from products order by id limit 20 offset 199980; -``` - -**Correct (cursor/keyset pagination):** - -```sql --- Page 1: get first 20 -select * from products order by id limit 20; --- Application stores last_id = 20 - --- Page 2: start after last ID -select * from products where id > 20 order by id limit 20; --- Uses index, always fast regardless of page depth - --- Page 10000: same speed as page 1 -select * from products where id > 199980 order by id limit 20; -``` - -For multi-column sorting: - -```sql --- Cursor must include all sort columns -select * from products -where (created_at, id) > ('2024-01-15 10:00:00', 12345) -order by created_at, id -limit 20; -``` - -Reference: [Pagination](https://supabase.com/docs/guides/database/pagination) diff --git a/.claude/skills/supabase-postgres-best-practices/references/data-upsert.md b/.claude/skills/supabase-postgres-best-practices/references/data-upsert.md deleted file mode 100644 index bc95e230..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/data-upsert.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use UPSERT for Insert-or-Update Operations -impact: MEDIUM -impactDescription: Atomic operation, eliminates race conditions -tags: upsert, on-conflict, insert, update ---- - -## Use UPSERT for Insert-or-Update Operations - -Using separate SELECT-then-INSERT/UPDATE creates race conditions. Use INSERT ... ON CONFLICT for atomic upserts. - -**Incorrect (check-then-insert race condition):** - -```sql --- Race condition: two requests check simultaneously -select * from settings where user_id = 123 and key = 'theme'; --- Both find nothing - --- Both try to insert -insert into settings (user_id, key, value) values (123, 'theme', 'dark'); --- One succeeds, one fails with duplicate key error! -``` - -**Correct (atomic UPSERT):** - -```sql --- Single atomic operation -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value, updated_at = now(); - --- Returns the inserted/updated row -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value -returning *; -``` - -Insert-or-ignore pattern: - -```sql --- Insert only if not exists (no update) -insert into page_views (page_id, user_id) -values (1, 123) -on conflict (page_id, user_id) do nothing; -``` - -Reference: [INSERT ON CONFLICT](https://www.postgresql.org/docs/current/sql-insert.html#SQL-ON-CONFLICT) diff --git a/.claude/skills/supabase-postgres-best-practices/references/lock-advisory.md b/.claude/skills/supabase-postgres-best-practices/references/lock-advisory.md deleted file mode 100644 index 572eaf0d..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/lock-advisory.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Use Advisory Locks for Application-Level Locking -impact: MEDIUM -impactDescription: Efficient coordination without row-level lock overhead -tags: advisory-locks, coordination, application-locks ---- - -## Use Advisory Locks for Application-Level Locking - -Advisory locks provide application-level coordination without requiring database rows to lock. - -**Incorrect (creating rows just for locking):** - -```sql --- Creating dummy rows to lock on -create table resource_locks ( - resource_name text primary key -); - -insert into resource_locks values ('report_generator'); - --- Lock by selecting the row -select * from resource_locks where resource_name = 'report_generator' for update; -``` - -**Correct (advisory locks):** - -```sql --- Session-level advisory lock (released on disconnect or unlock) -select pg_advisory_lock(hashtext('report_generator')); --- ... do exclusive work ... -select pg_advisory_unlock(hashtext('report_generator')); - --- Transaction-level lock (released on commit/rollback) -begin; -select pg_advisory_xact_lock(hashtext('daily_report')); --- ... do work ... -commit; -- Lock automatically released -``` - -Try-lock for non-blocking operations: - -```sql --- Returns immediately with true/false instead of waiting -select pg_try_advisory_lock(hashtext('resource_name')); - --- Use in application -if (acquired) { - -- Do work - select pg_advisory_unlock(hashtext('resource_name')); -} else { - -- Skip or retry later -} -``` - -Reference: [Advisory Locks](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) diff --git a/.claude/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md b/.claude/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md deleted file mode 100644 index 974da5ed..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Prevent Deadlocks with Consistent Lock Ordering -impact: MEDIUM-HIGH -impactDescription: Eliminate deadlock errors, improve reliability -tags: deadlocks, locking, transactions, ordering ---- - -## Prevent Deadlocks with Consistent Lock Ordering - -Deadlocks occur when transactions lock resources in different orders. Always -acquire locks in a consistent order. - -**Incorrect (inconsistent lock ordering):** - -```sql --- Transaction A -- Transaction B -begin; begin; -update accounts update accounts -set balance = balance - 100 set balance = balance - 50 -where id = 1; where id = 2; -- B locks row 2 - -update accounts update accounts -set balance = balance + 100 set balance = balance + 50 -where id = 2; -- A waits for B where id = 1; -- B waits for A - --- DEADLOCK! Both waiting for each other -``` - -**Correct (lock rows in consistent order first):** - -```sql --- Explicitly acquire locks in ID order before updating -begin; -select * from accounts where id in (1, 2) order by id for update; - --- Now perform updates in any order - locks already held -update accounts set balance = balance - 100 where id = 1; -update accounts set balance = balance + 100 where id = 2; -commit; -``` - -Alternative: use a single statement to update atomically: - -```sql --- Single statement acquires all locks atomically -begin; -update accounts -set balance = balance + case id - when 1 then -100 - when 2 then 100 -end -where id in (1, 2); -commit; -``` - -Detect deadlocks in logs: - -```sql --- Check for recent deadlocks -select * from pg_stat_database where deadlocks > 0; - --- Enable deadlock logging -set log_lock_waits = on; -set deadlock_timeout = '1s'; -``` - -Reference: -[Deadlocks](https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-DEADLOCKS) diff --git a/.claude/skills/supabase-postgres-best-practices/references/lock-short-transactions.md b/.claude/skills/supabase-postgres-best-practices/references/lock-short-transactions.md deleted file mode 100644 index e6b8ef26..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/lock-short-transactions.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Keep Transactions Short to Reduce Lock Contention -impact: MEDIUM-HIGH -impactDescription: 3-5x throughput improvement, fewer deadlocks -tags: transactions, locking, contention, performance ---- - -## Keep Transactions Short to Reduce Lock Contention - -Long-running transactions hold locks that block other queries. Keep transactions as short as possible. - -**Incorrect (long transaction with external calls):** - -```sql -begin; -select * from orders where id = 1 for update; -- Lock acquired - --- Application makes HTTP call to payment API (2-5 seconds) --- Other queries on this row are blocked! - -update orders set status = 'paid' where id = 1; -commit; -- Lock held for entire duration -``` - -**Correct (minimal transaction scope):** - -```sql --- Validate data and call APIs outside transaction --- Application: response = await paymentAPI.charge(...) - --- Only hold lock for the actual update -begin; -update orders -set status = 'paid', payment_id = $1 -where id = $2 and status = 'pending' -returning *; -commit; -- Lock held for milliseconds -``` - -Use `statement_timeout` to prevent runaway transactions: - -```sql --- Abort queries running longer than 30 seconds -set statement_timeout = '30s'; - --- Or per-session -set local statement_timeout = '5s'; -``` - -Reference: [Transaction Management](https://www.postgresql.org/docs/current/tutorial-transactions.html) diff --git a/.claude/skills/supabase-postgres-best-practices/references/lock-skip-locked.md b/.claude/skills/supabase-postgres-best-practices/references/lock-skip-locked.md deleted file mode 100644 index 77bdbb97..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/lock-skip-locked.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Use SKIP LOCKED for Non-Blocking Queue Processing -impact: MEDIUM-HIGH -impactDescription: 10x throughput for worker queues -tags: skip-locked, queue, workers, concurrency ---- - -## Use SKIP LOCKED for Non-Blocking Queue Processing - -When multiple workers process a queue, SKIP LOCKED allows workers to process different rows without waiting. - -**Incorrect (workers block each other):** - -```sql --- Worker 1 and Worker 2 both try to get next job -begin; -select * from jobs where status = 'pending' order by created_at limit 1 for update; --- Worker 2 waits for Worker 1's lock to release! -``` - -**Correct (SKIP LOCKED for parallel processing):** - -```sql --- Each worker skips locked rows and gets the next available -begin; -select * from jobs -where status = 'pending' -order by created_at -limit 1 -for update skip locked; - --- Worker 1 gets job 1, Worker 2 gets job 2 (no waiting) - -update jobs set status = 'processing' where id = $1; -commit; -``` - -Complete queue pattern: - -```sql --- Atomic claim-and-update in one statement -update jobs -set status = 'processing', worker_id = $1, started_at = now() -where id = ( - select id from jobs - where status = 'pending' - order by created_at - limit 1 - for update skip locked -) -returning *; -``` - -Reference: [SELECT FOR UPDATE SKIP LOCKED](https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE) diff --git a/.claude/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md b/.claude/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md deleted file mode 100644 index 542978c3..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use EXPLAIN ANALYZE to Diagnose Slow Queries -impact: LOW-MEDIUM -impactDescription: Identify exact bottlenecks in query execution -tags: explain, analyze, diagnostics, query-plan ---- - -## Use EXPLAIN ANALYZE to Diagnose Slow Queries - -EXPLAIN ANALYZE executes the query and shows actual timings, revealing the true performance bottlenecks. - -**Incorrect (guessing at performance issues):** - -```sql --- Query is slow, but why? -select * from orders where customer_id = 123 and status = 'pending'; --- "It must be missing an index" - but which one? -``` - -**Correct (use EXPLAIN ANALYZE):** - -```sql -explain (analyze, buffers, format text) -select * from orders where customer_id = 123 and status = 'pending'; - --- Output reveals the issue: --- Seq Scan on orders (cost=0.00..25000.00 rows=50 width=100) (actual time=0.015..450.123 rows=50 loops=1) --- Filter: ((customer_id = 123) AND (status = 'pending'::text)) --- Rows Removed by Filter: 999950 --- Buffers: shared hit=5000 read=15000 --- Planning Time: 0.150 ms --- Execution Time: 450.500 ms -``` - -Key things to look for: - -```sql --- Seq Scan on large tables = missing index --- Rows Removed by Filter = poor selectivity or missing index --- Buffers: read >> hit = data not cached, needs more memory --- Nested Loop with high loops = consider different join strategy --- Sort Method: external merge = work_mem too low -``` - -Reference: [EXPLAIN](https://supabase.com/docs/guides/database/inspect) diff --git a/.claude/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md b/.claude/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md deleted file mode 100644 index d7e82f1a..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Enable pg_stat_statements for Query Analysis -impact: LOW-MEDIUM -impactDescription: Identify top resource-consuming queries -tags: pg-stat-statements, monitoring, statistics, performance ---- - -## Enable pg_stat_statements for Query Analysis - -pg_stat_statements tracks execution statistics for all queries, helping identify slow and frequent queries. - -**Incorrect (no visibility into query patterns):** - -```sql --- Database is slow, but which queries are the problem? --- No way to know without pg_stat_statements -``` - -**Correct (enable and query pg_stat_statements):** - -```sql --- Enable the extension -create extension if not exists pg_stat_statements; - --- Find slowest queries by total time -select - calls, - round(total_exec_time::numeric, 2) as total_time_ms, - round(mean_exec_time::numeric, 2) as mean_time_ms, - query -from pg_stat_statements -order by total_exec_time desc -limit 10; - --- Find most frequent queries -select calls, query -from pg_stat_statements -order by calls desc -limit 10; - --- Reset statistics after optimization -select pg_stat_statements_reset(); -``` - -Key metrics to monitor: - -```sql --- Queries with high mean time (candidates for optimization) -select query, mean_exec_time, calls -from pg_stat_statements -where mean_exec_time > 100 -- > 100ms average -order by mean_exec_time desc; -``` - -Reference: [pg_stat_statements](https://supabase.com/docs/guides/database/extensions/pg_stat_statements) diff --git a/.claude/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md b/.claude/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md deleted file mode 100644 index e0e8ea0b..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Maintain Table Statistics with VACUUM and ANALYZE -impact: MEDIUM -impactDescription: 2-10x better query plans with accurate statistics -tags: vacuum, analyze, statistics, maintenance, autovacuum ---- - -## Maintain Table Statistics with VACUUM and ANALYZE - -Outdated statistics cause the query planner to make poor decisions. VACUUM reclaims space, ANALYZE updates statistics. - -**Incorrect (stale statistics):** - -```sql --- Table has 1M rows but stats say 1000 --- Query planner chooses wrong strategy -explain select * from orders where status = 'pending'; --- Shows: Seq Scan (because stats show small table) --- Actually: Index Scan would be much faster -``` - -**Correct (maintain fresh statistics):** - -```sql --- Manually analyze after large data changes -analyze orders; - --- Analyze specific columns used in WHERE clauses -analyze orders (status, created_at); - --- Check when tables were last analyzed -select - relname, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze -from pg_stat_user_tables -order by last_analyze nulls first; -``` - -Autovacuum tuning for busy tables: - -```sql --- Increase frequency for high-churn tables -alter table orders set ( - autovacuum_vacuum_scale_factor = 0.05, -- Vacuum at 5% dead tuples (default 20%) - autovacuum_analyze_scale_factor = 0.02 -- Analyze at 2% changes (default 10%) -); - --- Check autovacuum status -select * from pg_stat_progress_vacuum; -``` - -Reference: [VACUUM](https://supabase.com/docs/guides/database/database-size#vacuum-operations) diff --git a/.claude/skills/supabase-postgres-best-practices/references/query-composite-indexes.md b/.claude/skills/supabase-postgres-best-practices/references/query-composite-indexes.md deleted file mode 100644 index fea64523..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/query-composite-indexes.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Create Composite Indexes for Multi-Column Queries -impact: HIGH -impactDescription: 5-10x faster multi-column queries -tags: indexes, composite-index, multi-column, query-optimization ---- - -## Create Composite Indexes for Multi-Column Queries - -When queries filter on multiple columns, a composite index is more efficient than separate single-column indexes. - -**Incorrect (separate indexes require bitmap scan):** - -```sql --- Two separate indexes -create index orders_status_idx on orders (status); -create index orders_created_idx on orders (created_at); - --- Query must combine both indexes (slower) -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Correct (composite index):** - -```sql --- Single composite index (leftmost column first for equality checks) -create index orders_status_created_idx on orders (status, created_at); - --- Query uses one efficient index scan -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Column order matters** - place equality columns first, range columns last: - -```sql --- Good: status (=) before created_at (>) -create index idx on orders (status, created_at); - --- Works for: WHERE status = 'pending' --- Works for: WHERE status = 'pending' AND created_at > '2024-01-01' --- Does NOT work for: WHERE created_at > '2024-01-01' (leftmost prefix rule) -``` - -Reference: [Multicolumn Indexes](https://www.postgresql.org/docs/current/indexes-multicolumn.html) diff --git a/.claude/skills/supabase-postgres-best-practices/references/query-covering-indexes.md b/.claude/skills/supabase-postgres-best-practices/references/query-covering-indexes.md deleted file mode 100644 index 9d2a4947..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/query-covering-indexes.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Use Covering Indexes to Avoid Table Lookups -impact: MEDIUM-HIGH -impactDescription: 2-5x faster queries by eliminating heap fetches -tags: indexes, covering-index, include, index-only-scan ---- - -## Use Covering Indexes to Avoid Table Lookups - -Covering indexes include all columns needed by a query, enabling index-only scans that skip the table entirely. - -**Incorrect (index scan + heap fetch):** - -```sql -create index users_email_idx on users (email); - --- Must fetch name and created_at from table heap -select email, name, created_at from users where email = 'user@example.com'; -``` - -**Correct (index-only scan with INCLUDE):** - -```sql --- Include non-searchable columns in the index -create index users_email_idx on users (email) include (name, created_at); - --- All columns served from index, no table access needed -select email, name, created_at from users where email = 'user@example.com'; -``` - -Use INCLUDE for columns you SELECT but don't filter on: - -```sql --- Searching by status, but also need customer_id and total -create index orders_status_idx on orders (status) include (customer_id, total); - -select status, customer_id, total from orders where status = 'shipped'; -``` - -Reference: [Index-Only Scans](https://www.postgresql.org/docs/current/indexes-index-only-scans.html) diff --git a/.claude/skills/supabase-postgres-best-practices/references/query-index-types.md b/.claude/skills/supabase-postgres-best-practices/references/query-index-types.md deleted file mode 100644 index 93b32590..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/query-index-types.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Choose the Right Index Type for Your Data -impact: HIGH -impactDescription: 10-100x improvement with correct index type -tags: indexes, btree, gin, gist, brin, hash, index-types ---- - -## Choose the Right Index Type for Your Data - -Different index types excel at different query patterns. The default B-tree isn't always optimal. - -**Incorrect (B-tree for JSONB containment):** - -```sql --- B-tree cannot optimize containment operators -create index products_attrs_idx on products (attributes); -select * from products where attributes @> '{"color": "red"}'; --- Full table scan - B-tree doesn't support @> operator -``` - -**Correct (GIN for JSONB):** - -```sql --- GIN supports @>, ?, ?&, ?| operators -create index products_attrs_idx on products using gin (attributes); -select * from products where attributes @> '{"color": "red"}'; -``` - -Index type guide: - -```sql --- B-tree (default): =, <, >, BETWEEN, IN, IS NULL -create index users_created_idx on users (created_at); - --- GIN: arrays, JSONB, full-text search -create index posts_tags_idx on posts using gin (tags); - --- GiST: geometric data, range types, nearest-neighbor (KNN) queries -create index locations_idx on places using gist (location); - --- BRIN: large time-series tables (10-100x smaller) -create index events_time_idx on events using brin (created_at); - --- Hash: equality-only (slightly faster than B-tree for =) -create index sessions_token_idx on sessions using hash (token); -``` - -Reference: [Index Types](https://www.postgresql.org/docs/current/indexes-types.html) diff --git a/.claude/skills/supabase-postgres-best-practices/references/query-missing-indexes.md b/.claude/skills/supabase-postgres-best-practices/references/query-missing-indexes.md deleted file mode 100644 index e6daace7..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/query-missing-indexes.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Add Indexes on WHERE and JOIN Columns -impact: CRITICAL -impactDescription: 100-1000x faster queries on large tables -tags: indexes, performance, sequential-scan, query-optimization ---- - -## Add Indexes on WHERE and JOIN Columns - -Queries filtering or joining on unindexed columns cause full table scans, which become exponentially slower as tables grow. - -**Incorrect (sequential scan on large table):** - -```sql --- No index on customer_id causes full table scan -select * from orders where customer_id = 123; - --- EXPLAIN shows: Seq Scan on orders (cost=0.00..25000.00 rows=100 width=85) -``` - -**Correct (index scan):** - -```sql --- Create index on frequently filtered column -create index orders_customer_id_idx on orders (customer_id); - -select * from orders where customer_id = 123; - --- EXPLAIN shows: Index Scan using orders_customer_id_idx (cost=0.42..8.44 rows=100 width=85) -``` - -For JOIN columns, always index the foreign key side: - -```sql --- Index the referencing column -create index orders_customer_id_idx on orders (customer_id); - -select c.name, o.total -from customers c -join orders o on o.customer_id = c.id; -``` - -Reference: [Query Optimization](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.claude/skills/supabase-postgres-best-practices/references/query-partial-indexes.md b/.claude/skills/supabase-postgres-best-practices/references/query-partial-indexes.md deleted file mode 100644 index 3e61a341..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/query-partial-indexes.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use Partial Indexes for Filtered Queries -impact: HIGH -impactDescription: 5-20x smaller indexes, faster writes and queries -tags: indexes, partial-index, query-optimization, storage ---- - -## Use Partial Indexes for Filtered Queries - -Partial indexes only include rows matching a WHERE condition, making them smaller and faster when queries consistently filter on the same condition. - -**Incorrect (full index includes irrelevant rows):** - -```sql --- Index includes all rows, even soft-deleted ones -create index users_email_idx on users (email); - --- Query always filters active users -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -**Correct (partial index matches query filter):** - -```sql --- Index only includes active users -create index users_active_email_idx on users (email) -where deleted_at is null; - --- Query uses the smaller, faster index -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -Common use cases for partial indexes: - -```sql --- Only pending orders (status rarely changes once completed) -create index orders_pending_idx on orders (created_at) -where status = 'pending'; - --- Only non-null values -create index products_sku_idx on products (sku) -where sku is not null; -``` - -Reference: [Partial Indexes](https://www.postgresql.org/docs/current/indexes-partial.html) diff --git a/.claude/skills/supabase-postgres-best-practices/references/schema-constraints.md b/.claude/skills/supabase-postgres-best-practices/references/schema-constraints.md deleted file mode 100644 index 1d2ef8f9..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/schema-constraints.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Add Constraints Safely in Migrations -impact: HIGH -impactDescription: Prevents migration failures and enables idempotent schema changes -tags: constraints, migrations, schema, alter-table ---- - -## Add Constraints Safely in Migrations - -PostgreSQL does not support `ADD CONSTRAINT IF NOT EXISTS`. Migrations using this syntax will fail. - -**Incorrect (causes syntax error):** - -```sql --- ERROR: syntax error at or near "not" (SQLSTATE 42601) -alter table public.profiles -add constraint if not exists profiles_birthchart_id_unique unique (birthchart_id); -``` - -**Correct (idempotent constraint creation):** - -```sql --- Use DO block to check before adding -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_unique' - and conrelid = 'public.profiles'::regclass - ) then - alter table public.profiles - add constraint profiles_birthchart_id_unique unique (birthchart_id); - end if; -end $$; -``` - -For all constraint types: - -```sql --- Check constraints -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'check_age_positive' - ) then - alter table users add constraint check_age_positive check (age > 0); - end if; -end $$; - --- Foreign keys -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_fkey' - ) then - alter table profiles - add constraint profiles_birthchart_id_fkey - foreign key (birthchart_id) references birthcharts(id); - end if; -end $$; -``` - -Check if constraint exists: - -```sql --- Query to check constraint existence -select conname, contype, pg_get_constraintdef(oid) -from pg_constraint -where conrelid = 'public.profiles'::regclass; - --- contype values: --- 'p' = PRIMARY KEY --- 'f' = FOREIGN KEY --- 'u' = UNIQUE --- 'c' = CHECK -``` - -Reference: [Constraints](https://www.postgresql.org/docs/current/ddl-constraints.html) diff --git a/.claude/skills/supabase-postgres-best-practices/references/schema-data-types.md b/.claude/skills/supabase-postgres-best-practices/references/schema-data-types.md deleted file mode 100644 index f253a581..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/schema-data-types.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Choose Appropriate Data Types -impact: HIGH -impactDescription: 50% storage reduction, faster comparisons -tags: data-types, schema, storage, performance ---- - -## Choose Appropriate Data Types - -Using the right data types reduces storage, improves query performance, and prevents bugs. - -**Incorrect (wrong data types):** - -```sql -create table users ( - id int, -- Will overflow at 2.1 billion - email varchar(255), -- Unnecessary length limit - created_at timestamp, -- Missing timezone info - is_active varchar(5), -- String for boolean - price varchar(20) -- String for numeric -); -``` - -**Correct (appropriate data types):** - -```sql -create table users ( - id bigint generated always as identity primary key, -- 9 quintillion max - email text, -- No artificial limit, same performance as varchar - created_at timestamptz, -- Always store timezone-aware timestamps - is_active boolean default true, -- 1 byte vs variable string length - price numeric(10,2) -- Exact decimal arithmetic -); -``` - -Key guidelines: - -```sql --- IDs: use bigint, not int (future-proofing) --- Strings: use text, not varchar(n) unless constraint needed --- Time: use timestamptz, not timestamp --- Money: use numeric, not float (precision matters) --- Enums: use text with check constraint or create enum type -``` - -Reference: [Data Types](https://www.postgresql.org/docs/current/datatype.html) diff --git a/.claude/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md b/.claude/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md deleted file mode 100644 index 6c3d6ff6..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Index Foreign Key Columns -impact: HIGH -impactDescription: 10-100x faster JOINs and CASCADE operations -tags: foreign-key, indexes, joins, schema ---- - -## Index Foreign Key Columns - -Postgres does not automatically index foreign key columns. Missing indexes cause slow JOINs and CASCADE operations. - -**Incorrect (unindexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- No index on customer_id! --- JOINs and ON DELETE CASCADE both require full table scan -select * from orders where customer_id = 123; -- Seq Scan -delete from customers where id = 123; -- Locks table, scans all orders -``` - -**Correct (indexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- Always index the FK column -create index orders_customer_id_idx on orders (customer_id); - --- Now JOINs and cascades are fast -select * from orders where customer_id = 123; -- Index Scan -delete from customers where id = 123; -- Uses index, fast cascade -``` - -Find missing FK indexes: - -```sql -select - conrelid::regclass as table_name, - a.attname as fk_column -from pg_constraint c -join pg_attribute a on a.attrelid = c.conrelid and a.attnum = any(c.conkey) -where c.contype = 'f' - and not exists ( - select 1 from pg_index i - where i.indrelid = c.conrelid and a.attnum = any(i.indkey) - ); -``` - -Reference: [Foreign Keys](https://www.postgresql.org/docs/current/ddl-constraints.html#DDL-CONSTRAINTS-FK) diff --git a/.claude/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md b/.claude/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md deleted file mode 100644 index f0072940..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use Lowercase Identifiers for Compatibility -impact: MEDIUM -impactDescription: Avoid case-sensitivity bugs with tools, ORMs, and AI assistants -tags: naming, identifiers, case-sensitivity, schema, conventions ---- - -## Use Lowercase Identifiers for Compatibility - -PostgreSQL folds unquoted identifiers to lowercase. Quoted mixed-case identifiers require quotes forever and cause issues with tools, ORMs, and AI assistants that may not recognize them. - -**Incorrect (mixed-case identifiers):** - -```sql --- Quoted identifiers preserve case but require quotes everywhere -CREATE TABLE "Users" ( - "userId" bigint PRIMARY KEY, - "firstName" text, - "lastName" text -); - --- Must always quote or queries fail -SELECT "firstName" FROM "Users" WHERE "userId" = 1; - --- This fails - Users becomes users without quotes -SELECT firstName FROM Users; --- ERROR: relation "users" does not exist -``` - -**Correct (lowercase snake_case):** - -```sql --- Unquoted lowercase identifiers are portable and tool-friendly -CREATE TABLE users ( - user_id bigint PRIMARY KEY, - first_name text, - last_name text -); - --- Works without quotes, recognized by all tools -SELECT first_name FROM users WHERE user_id = 1; -``` - -Common sources of mixed-case identifiers: - -```sql --- ORMs often generate quoted camelCase - configure them to use snake_case --- Migrations from other databases may preserve original casing --- Some GUI tools quote identifiers by default - disable this - --- If stuck with mixed-case, create views as a compatibility layer -CREATE VIEW users AS SELECT "userId" AS user_id, "firstName" AS first_name FROM "Users"; -``` - -Reference: [Identifiers and Key Words](https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS) diff --git a/.claude/skills/supabase-postgres-best-practices/references/schema-partitioning.md b/.claude/skills/supabase-postgres-best-practices/references/schema-partitioning.md deleted file mode 100644 index 13137a03..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/schema-partitioning.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Partition Large Tables for Better Performance -impact: MEDIUM-HIGH -impactDescription: 5-20x faster queries and maintenance on large tables -tags: partitioning, large-tables, time-series, performance ---- - -## Partition Large Tables for Better Performance - -Partitioning splits a large table into smaller pieces, improving query performance and maintenance operations. - -**Incorrect (single large table):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz, - data jsonb -); - --- 500M rows, queries scan everything -select * from events where created_at > '2024-01-01'; -- Slow -vacuum events; -- Takes hours, locks table -``` - -**Correct (partitioned by time range):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz not null, - data jsonb -) partition by range (created_at); - --- Create partitions for each month -create table events_2024_01 partition of events - for values from ('2024-01-01') to ('2024-02-01'); - -create table events_2024_02 partition of events - for values from ('2024-02-01') to ('2024-03-01'); - --- Queries only scan relevant partitions -select * from events where created_at > '2024-01-15'; -- Only scans events_2024_01+ - --- Drop old data instantly -drop table events_2023_01; -- Instant vs DELETE taking hours -``` - -When to partition: - -- Tables > 100M rows -- Time-series data with date-based queries -- Need to efficiently drop old data - -Reference: [Table Partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) diff --git a/.claude/skills/supabase-postgres-best-practices/references/schema-primary-keys.md b/.claude/skills/supabase-postgres-best-practices/references/schema-primary-keys.md deleted file mode 100644 index fb0fbb16..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/schema-primary-keys.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Select Optimal Primary Key Strategy -impact: HIGH -impactDescription: Better index locality, reduced fragmentation -tags: primary-key, identity, uuid, serial, schema ---- - -## Select Optimal Primary Key Strategy - -Primary key choice affects insert performance, index size, and replication -efficiency. - -**Incorrect (problematic PK choices):** - -```sql --- identity is the SQL-standard approach -create table users ( - id serial primary key -- Works, but IDENTITY is recommended -); - --- Random UUIDs (v4) cause index fragmentation -create table orders ( - id uuid default gen_random_uuid() primary key -- UUIDv4 = random = scattered inserts -); -``` - -**Correct (optimal PK strategies):** - -```sql --- Use IDENTITY for sequential IDs (SQL-standard, best for most cases) -create table users ( - id bigint generated always as identity primary key -); - --- For distributed systems needing UUIDs, use UUIDv7 (time-ordered) --- Requires pg_uuidv7 extension: create extension pg_uuidv7; -create table orders ( - id uuid default uuid_generate_v7() primary key -- Time-ordered, no fragmentation -); - --- Alternative: time-prefixed IDs for sortable, distributed IDs (no extension needed) -create table events ( - id text default concat( - to_char(now() at time zone 'utc', 'YYYYMMDDHH24MISSMS'), - gen_random_uuid()::text - ) primary key -); -``` - -Guidelines: - -- Single database: `bigint identity` (sequential, 8 bytes, SQL-standard) -- Distributed/exposed IDs: UUIDv7 (requires pg_uuidv7) or ULID (time-ordered, no - fragmentation) -- `serial` works but `identity` is SQL-standard and preferred for new - applications -- Avoid random UUIDs (v4) as primary keys on large tables (causes index - fragmentation) - -Reference: -[Identity Columns](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-PARMS-GENERATED-IDENTITY) diff --git a/.claude/skills/supabase-postgres-best-practices/references/security-privileges.md b/.claude/skills/supabase-postgres-best-practices/references/security-privileges.md deleted file mode 100644 index 448ec345..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/security-privileges.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Apply Principle of Least Privilege -impact: MEDIUM -impactDescription: Reduced attack surface, better audit trail -tags: privileges, security, roles, permissions ---- - -## Apply Principle of Least Privilege - -Grant only the minimum permissions required. Never use superuser for application queries. - -**Incorrect (overly broad permissions):** - -```sql --- Application uses superuser connection --- Or grants ALL to application role -grant all privileges on all tables in schema public to app_user; -grant all privileges on all sequences in schema public to app_user; - --- Any SQL injection becomes catastrophic --- drop table users; cascades to everything -``` - -**Correct (minimal, specific grants):** - -```sql --- Create role with no default privileges -create role app_readonly nologin; - --- Grant only SELECT on specific tables -grant usage on schema public to app_readonly; -grant select on public.products, public.categories to app_readonly; - --- Create role for writes with limited scope -create role app_writer nologin; -grant usage on schema public to app_writer; -grant select, insert, update on public.orders to app_writer; -grant usage on sequence orders_id_seq to app_writer; --- No DELETE permission - --- Login role inherits from these -create role app_user login password 'xxx'; -grant app_writer to app_user; -``` - -Revoke public defaults: - -```sql --- Revoke default public access -revoke all on schema public from public; -revoke all on all tables in schema public from public; -``` - -Reference: [Roles and Privileges](https://supabase.com/blog/postgres-roles-and-privileges) diff --git a/.claude/skills/supabase-postgres-best-practices/references/security-rls-basics.md b/.claude/skills/supabase-postgres-best-practices/references/security-rls-basics.md deleted file mode 100644 index c61e1a85..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/security-rls-basics.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Enable Row Level Security for Multi-Tenant Data -impact: CRITICAL -impactDescription: Database-enforced tenant isolation, prevent data leaks -tags: rls, row-level-security, multi-tenant, security ---- - -## Enable Row Level Security for Multi-Tenant Data - -Row Level Security (RLS) enforces data access at the database level, ensuring users only see their own data. - -**Incorrect (application-level filtering only):** - -```sql --- Relying only on application to filter -select * from orders where user_id = $current_user_id; - --- Bug or bypass means all data is exposed! -select * from orders; -- Returns ALL orders -``` - -**Correct (database-enforced RLS):** - -```sql --- Enable RLS on the table -alter table orders enable row level security; - --- Create policy for users to see only their orders -create policy orders_user_policy on orders - for all - using (user_id = current_setting('app.current_user_id')::bigint); - --- Force RLS even for table owners -alter table orders force row level security; - --- Set user context and query -set app.current_user_id = '123'; -select * from orders; -- Only returns orders for user 123 -``` - -Policy for authenticated role: - -```sql -create policy orders_user_policy on orders - for all - to authenticated - using (user_id = auth.uid()); -``` - -Reference: [Row Level Security](https://supabase.com/docs/guides/database/postgres/row-level-security) diff --git a/.claude/skills/supabase-postgres-best-practices/references/security-rls-performance.md b/.claude/skills/supabase-postgres-best-practices/references/security-rls-performance.md deleted file mode 100644 index b32d92f7..00000000 --- a/.claude/skills/supabase-postgres-best-practices/references/security-rls-performance.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Optimize RLS Policies for Performance -impact: HIGH -impactDescription: 5-10x faster RLS queries with proper patterns -tags: rls, performance, security, optimization ---- - -## Optimize RLS Policies for Performance - -Poorly written RLS policies can cause severe performance issues. Use subqueries and indexes strategically. - -**Incorrect (function called for every row):** - -```sql -create policy orders_policy on orders - using (auth.uid() = user_id); -- auth.uid() called per row! - --- With 1M rows, auth.uid() is called 1M times -``` - -**Correct (wrap functions in SELECT):** - -```sql -create policy orders_policy on orders - using ((select auth.uid()) = user_id); -- Called once, cached - --- 100x+ faster on large tables -``` - -Use security definer functions for complex checks: - -```sql --- Create helper function (runs as definer, bypasses RLS) -create or replace function is_team_member(team_id bigint) -returns boolean -language sql -security definer -set search_path = '' -as $$ - select exists ( - select 1 from public.team_members - where team_id = $1 and user_id = (select auth.uid()) - ); -$$; - --- Use in policy (indexed lookup, not per-row check) -create policy team_orders_policy on orders - using ((select is_team_member(team_id))); -``` - -Always add indexes on columns used in RLS policies: - -```sql -create index orders_user_id_idx on orders (user_id); -``` - -Reference: [RLS Performance](https://supabase.com/docs/guides/database/postgres/row-level-security#rls-performance-recommendations) diff --git a/.continue/skills/supabase-postgres-best-practices/AGENTS.md b/.continue/skills/supabase-postgres-best-practices/AGENTS.md deleted file mode 100644 index a7baf445..00000000 --- a/.continue/skills/supabase-postgres-best-practices/AGENTS.md +++ /dev/null @@ -1,68 +0,0 @@ -# Supabase Postgres Best Practices - -## Structure - -``` -supabase-postgres-best-practices/ - SKILL.md # Main skill file - read this first - AGENTS.md # This navigation guide - CLAUDE.md # Symlink to AGENTS.md - references/ # Detailed reference files -``` - -## Usage - -1. Read `SKILL.md` for the main skill instructions -2. Browse `references/` for detailed documentation on specific topics -3. Reference files are loaded on-demand - read only what you need - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.continue/skills/supabase-postgres-best-practices/CLAUDE.md b/.continue/skills/supabase-postgres-best-practices/CLAUDE.md deleted file mode 100644 index 47dc3e3d..00000000 --- a/.continue/skills/supabase-postgres-best-practices/CLAUDE.md +++ /dev/null @@ -1 +0,0 @@ -AGENTS.md \ No newline at end of file diff --git a/.continue/skills/supabase-postgres-best-practices/README.md b/.continue/skills/supabase-postgres-best-practices/README.md deleted file mode 100644 index f1a374e1..00000000 --- a/.continue/skills/supabase-postgres-best-practices/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# Supabase Postgres Best Practices - Contributor Guide - -This skill contains Postgres performance optimization references optimized for -AI agents and LLMs. It follows the [Agent Skills Open Standard](https://agentskills.io/). - -## Quick Start - -```bash -# From repository root -npm install - -# Validate existing references -npm run validate - -# Build AGENTS.md -npm run build -``` - -## Creating a New Reference - -1. **Choose a section prefix** based on the category: - - `query-` Query Performance (CRITICAL) - - `conn-` Connection Management (CRITICAL) - - `security-` Security & RLS (CRITICAL) - - `schema-` Schema Design (HIGH) - - `lock-` Concurrency & Locking (MEDIUM-HIGH) - - `data-` Data Access Patterns (MEDIUM) - - `monitor-` Monitoring & Diagnostics (LOW-MEDIUM) - - `advanced-` Advanced Features (LOW) - -2. **Copy the template**: - ```bash - cp references/_template.md references/query-your-reference-name.md - ``` - -3. **Fill in the content** following the template structure - -4. **Validate and build**: - ```bash - npm run validate - npm run build - ``` - -5. **Review** the generated `AGENTS.md` - -## Skill Structure - -``` -skills/supabase-postgres-best-practices/ -β”œβ”€β”€ SKILL.md # Agent-facing skill manifest (Agent Skills spec) -β”œβ”€β”€ AGENTS.md # [GENERATED] Compiled references document -β”œβ”€β”€ README.md # This file -└── references/ - β”œβ”€β”€ _template.md # Reference template - β”œβ”€β”€ _sections.md # Section definitions - β”œβ”€β”€ _contributing.md # Writing guidelines - └── *.md # Individual references - -packages/skills-build/ -β”œβ”€β”€ src/ # Generic build system source -└── package.json # NPM scripts -``` - -## Reference File Structure - -See `references/_template.md` for the complete template. Key elements: - -````markdown ---- -title: Clear, Action-Oriented Title -impact: CRITICAL|HIGH|MEDIUM-HIGH|MEDIUM|LOW-MEDIUM|LOW -impactDescription: Quantified benefit (e.g., "10-100x faster") -tags: relevant, keywords ---- - -## [Title] - -[1-2 sentence explanation] - -**Incorrect (description):** - -```sql --- Comment explaining what's wrong -[Bad SQL example] -``` -```` - -**Correct (description):** - -```sql --- Comment explaining why this is better -[Good SQL example] -``` - -``` -## Writing Guidelines - -See `references/_contributing.md` for detailed guidelines. Key principles: - -1. **Show concrete transformations** - "Change X to Y", not abstract advice -2. **Error-first structure** - Show the problem before the solution -3. **Quantify impact** - Include specific metrics (10x faster, 50% smaller) -4. **Self-contained examples** - Complete, runnable SQL -5. **Semantic naming** - Use meaningful names (users, email), not (table1, col1) - -## Impact Levels - -| Level | Improvement | Examples | -|-------|-------------|----------| -| CRITICAL | 10-100x | Missing indexes, connection exhaustion | -| HIGH | 5-20x | Wrong index types, poor partitioning | -| MEDIUM-HIGH | 2-5x | N+1 queries, RLS optimization | -| MEDIUM | 1.5-3x | Redundant indexes, stale statistics | -| LOW-MEDIUM | 1.2-2x | VACUUM tuning, config tweaks | -| LOW | Incremental | Advanced patterns, edge cases | -``` diff --git a/.continue/skills/supabase-postgres-best-practices/SKILL.md b/.continue/skills/supabase-postgres-best-practices/SKILL.md deleted file mode 100644 index f80be156..00000000 --- a/.continue/skills/supabase-postgres-best-practices/SKILL.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -name: supabase-postgres-best-practices -description: Postgres performance optimization and best practices from Supabase. Use this skill when writing, reviewing, or optimizing Postgres queries, schema designs, or database configurations. -license: MIT -metadata: - author: supabase - version: "1.1.0" - organization: Supabase - date: January 2026 - abstract: Comprehensive Postgres performance optimization guide for developers using Supabase and Postgres. Contains performance rules across 8 categories, prioritized by impact from critical (query performance, connection management) to incremental (advanced features). Each rule includes detailed explanations, incorrect vs. correct SQL examples, query plan analysis, and specific performance metrics to guide automated optimization and code generation. ---- - -# Supabase Postgres Best Practices - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.continue/skills/supabase-postgres-best-practices/references/_contributing.md b/.continue/skills/supabase-postgres-best-practices/references/_contributing.md deleted file mode 100644 index 10de8ecb..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/_contributing.md +++ /dev/null @@ -1,171 +0,0 @@ -# Writing Guidelines for Postgres References - -This document provides guidelines for creating effective Postgres best -practice references that work well with AI agents and LLMs. - -## Key Principles - -### 1. Concrete Transformation Patterns - -Show exact SQL rewrites. Avoid philosophical advice. - -**Good:** "Use `WHERE id = ANY(ARRAY[...])` instead of -`WHERE id IN (SELECT ...)`" **Bad:** "Design good schemas" - -### 2. Error-First Structure - -Always show the problematic pattern first, then the solution. This trains agents -to recognize anti-patterns. - -```markdown -**Incorrect (sequential queries):** [bad example] - -**Correct (batched query):** [good example] -``` - -### 3. Quantified Impact - -Include specific metrics. Helps agents prioritize fixes. - -**Good:** "10x faster queries", "50% smaller index", "Eliminates N+1" -**Bad:** "Faster", "Better", "More efficient" - -### 4. Self-Contained Examples - -Examples should be complete and runnable (or close to it). Include `CREATE TABLE` -if context is needed. - -```sql --- Include table definition when needed for clarity -CREATE TABLE users ( - id bigint PRIMARY KEY, - email text NOT NULL, - deleted_at timestamptz -); - --- Now show the index -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; -``` - -### 5. Semantic Naming - -Use meaningful table/column names. Names carry intent for LLMs. - -**Good:** `users`, `email`, `created_at`, `is_active` -**Bad:** `table1`, `col1`, `field`, `flag` - ---- - -## Code Example Standards - -### SQL Formatting - -```sql --- Use lowercase keywords, clear formatting -CREATE INDEX CONCURRENTLY users_email_idx - ON users(email) - WHERE deleted_at IS NULL; - --- Not cramped or ALL CAPS -CREATE INDEX CONCURRENTLY USERS_EMAIL_IDX ON USERS(EMAIL) WHERE DELETED_AT IS NULL; -``` - -### Comments - -- Explain _why_, not _what_ -- Highlight performance implications -- Point out common pitfalls - -### Language Tags - -- `sql` - Standard SQL queries -- `plpgsql` - Stored procedures/functions -- `typescript` - Application code (when needed) -- `python` - Application code (when needed) - ---- - -## When to Include Application Code - -**Default: SQL Only** - -Most references should focus on pure SQL patterns. This keeps examples portable. - -**Include Application Code When:** - -- Connection pooling configuration -- Transaction management in application context -- ORM anti-patterns (N+1 in Prisma/TypeORM) -- Prepared statement usage - -**Format for Mixed Examples:** - -````markdown -**Incorrect (N+1 in application):** - -```typescript -for (const user of users) { - const posts = await db.query("SELECT * FROM posts WHERE user_id = $1", [ - user.id, - ]); -} -``` -```` - -**Correct (batch query):** - -```typescript -const posts = await db.query("SELECT * FROM posts WHERE user_id = ANY($1)", [ - userIds, -]); -``` - ---- - -## Impact Level Guidelines - -| Level | Improvement | Use When | -|-------|-------------|----------| -| **CRITICAL** | 10-100x | Missing indexes, connection exhaustion, sequential scans on large tables | -| **HIGH** | 5-20x | Wrong index types, poor partitioning, missing covering indexes | -| **MEDIUM-HIGH** | 2-5x | N+1 queries, inefficient pagination, RLS optimization | -| **MEDIUM** | 1.5-3x | Redundant indexes, query plan instability | -| **LOW-MEDIUM** | 1.2-2x | VACUUM tuning, configuration tweaks | -| **LOW** | Incremental | Advanced patterns, edge cases | - ---- - -## Reference Standards - -**Primary Sources:** - -- Official Postgres documentation -- Supabase documentation -- Postgres wiki -- Established blogs (2ndQuadrant, Crunchy Data) - -**Format:** - -```markdown -Reference: -[Postgres Indexes](https://www.postgresql.org/docs/current/indexes.html) -``` - ---- - -## Review Checklist - -Before submitting a reference: - -- [ ] Title is clear and action-oriented -- [ ] Impact level matches the performance gain -- [ ] impactDescription includes quantification -- [ ] Explanation is concise (1-2 sentences) -- [ ] Has at least 1 **Incorrect** SQL example -- [ ] Has at least 1 **Correct** SQL example -- [ ] SQL uses semantic naming -- [ ] Comments explain _why_, not _what_ -- [ ] Trade-offs mentioned if applicable -- [ ] Reference links included -- [ ] `npm run validate` passes -- [ ] `npm run build` generates correct output diff --git a/.continue/skills/supabase-postgres-best-practices/references/_sections.md b/.continue/skills/supabase-postgres-best-practices/references/_sections.md deleted file mode 100644 index 8ba57c23..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/_sections.md +++ /dev/null @@ -1,39 +0,0 @@ -# Section Definitions - -This file defines the rule categories for Postgres best practices. Rules are automatically assigned to sections based on their filename prefix. - -Take the examples below as pure demonstrative. Replace each section with the actual rule categories for Postgres best practices. - ---- - -## 1. Query Performance (query) -**Impact:** CRITICAL -**Description:** Slow queries, missing indexes, inefficient query plans. The most common source of Postgres performance issues. - -## 2. Connection Management (conn) -**Impact:** CRITICAL -**Description:** Connection pooling, limits, and serverless strategies. Critical for applications with high concurrency or serverless deployments. - -## 3. Security & RLS (security) -**Impact:** CRITICAL -**Description:** Row-Level Security policies, privilege management, and authentication patterns. - -## 4. Schema Design (schema) -**Impact:** HIGH -**Description:** Table design, index strategies, partitioning, and data type selection. Foundation for long-term performance. - -## 5. Concurrency & Locking (lock) -**Impact:** MEDIUM-HIGH -**Description:** Transaction management, isolation levels, deadlock prevention, and lock contention patterns. - -## 6. Data Access Patterns (data) -**Impact:** MEDIUM -**Description:** N+1 query elimination, batch operations, cursor-based pagination, and efficient data fetching. - -## 7. Monitoring & Diagnostics (monitor) -**Impact:** LOW-MEDIUM -**Description:** Using pg_stat_statements, EXPLAIN ANALYZE, metrics collection, and performance diagnostics. - -## 8. Advanced Features (advanced) -**Impact:** LOW -**Description:** Full-text search, JSONB optimization, PostGIS, extensions, and advanced Postgres features. diff --git a/.continue/skills/supabase-postgres-best-practices/references/_template.md b/.continue/skills/supabase-postgres-best-practices/references/_template.md deleted file mode 100644 index 91ace90e..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/_template.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Clear, Action-Oriented Title (e.g., "Use Partial Indexes for Filtered Queries") -impact: MEDIUM -impactDescription: 5-20x query speedup for filtered queries -tags: indexes, query-optimization, performance ---- - -## [Rule Title] - -[1-2 sentence explanation of the problem and why it matters. Focus on performance impact.] - -**Incorrect (describe the problem):** - -```sql --- Comment explaining what makes this slow/problematic -CREATE INDEX users_email_idx ON users(email); - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- This scans deleted records unnecessarily -``` - -**Correct (describe the solution):** - -```sql --- Comment explaining why this is better -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- Only indexes active users, 10x smaller index, faster queries -``` - -[Optional: Additional context, edge cases, or trade-offs] - -Reference: [Postgres Docs](https://www.postgresql.org/docs/current/) diff --git a/.continue/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md b/.continue/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md deleted file mode 100644 index 582cbeaa..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use tsvector for Full-Text Search -impact: MEDIUM -impactDescription: 100x faster than LIKE, with ranking support -tags: full-text-search, tsvector, gin, search ---- - -## Use tsvector for Full-Text Search - -LIKE with wildcards can't use indexes. Full-text search with tsvector is orders of magnitude faster. - -**Incorrect (LIKE pattern matching):** - -```sql --- Cannot use index, scans all rows -select * from articles where content like '%postgresql%'; - --- Case-insensitive makes it worse -select * from articles where lower(content) like '%postgresql%'; -``` - -**Correct (full-text search with tsvector):** - -```sql --- Add tsvector column and index -alter table articles add column search_vector tsvector - generated always as (to_tsvector('english', coalesce(title,'') || ' ' || coalesce(content,''))) stored; - -create index articles_search_idx on articles using gin (search_vector); - --- Fast full-text search -select * from articles -where search_vector @@ to_tsquery('english', 'postgresql & performance'); - --- With ranking -select *, ts_rank(search_vector, query) as rank -from articles, to_tsquery('english', 'postgresql') query -where search_vector @@ query -order by rank desc; -``` - -Search multiple terms: - -```sql --- AND: both terms required -to_tsquery('postgresql & performance') - --- OR: either term -to_tsquery('postgresql | mysql') - --- Prefix matching -to_tsquery('post:*') -``` - -Reference: [Full Text Search](https://supabase.com/docs/guides/database/full-text-search) diff --git a/.continue/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md b/.continue/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md deleted file mode 100644 index e3d261ea..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Index JSONB Columns for Efficient Querying -impact: MEDIUM -impactDescription: 10-100x faster JSONB queries with proper indexing -tags: jsonb, gin, indexes, json ---- - -## Index JSONB Columns for Efficient Querying - -JSONB queries without indexes scan the entire table. Use GIN indexes for containment queries. - -**Incorrect (no index on JSONB):** - -```sql -create table products ( - id bigint primary key, - attributes jsonb -); - --- Full table scan for every query -select * from products where attributes @> '{"color": "red"}'; -select * from products where attributes->>'brand' = 'Nike'; -``` - -**Correct (GIN index for JSONB):** - -```sql --- GIN index for containment operators (@>, ?, ?&, ?|) -create index products_attrs_gin on products using gin (attributes); - --- Now containment queries use the index -select * from products where attributes @> '{"color": "red"}'; - --- For specific key lookups, use expression index -create index products_brand_idx on products ((attributes->>'brand')); -select * from products where attributes->>'brand' = 'Nike'; -``` - -Choose the right operator class: - -```sql --- jsonb_ops (default): supports all operators, larger index -create index idx1 on products using gin (attributes); - --- jsonb_path_ops: only @> operator, but 2-3x smaller index -create index idx2 on products using gin (attributes jsonb_path_ops); -``` - -Reference: [JSONB Indexes](https://www.postgresql.org/docs/current/datatype-json.html#JSON-INDEXING) diff --git a/.continue/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md b/.continue/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md deleted file mode 100644 index 40b9cc50..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Configure Idle Connection Timeouts -impact: HIGH -impactDescription: Reclaim 30-50% of connection slots from idle clients -tags: connections, timeout, idle, resource-management ---- - -## Configure Idle Connection Timeouts - -Idle connections waste resources. Configure timeouts to automatically reclaim them. - -**Incorrect (connections held indefinitely):** - -```sql --- No timeout configured -show idle_in_transaction_session_timeout; -- 0 (disabled) - --- Connections stay open forever, even when idle -select pid, state, state_change, query -from pg_stat_activity -where state = 'idle in transaction'; --- Shows transactions idle for hours, holding locks -``` - -**Correct (automatic cleanup of idle connections):** - -```sql --- Terminate connections idle in transaction after 30 seconds -alter system set idle_in_transaction_session_timeout = '30s'; - --- Terminate completely idle connections after 10 minutes -alter system set idle_session_timeout = '10min'; - --- Reload configuration -select pg_reload_conf(); -``` - -For pooled connections, configure at the pooler level: - -```ini -# pgbouncer.ini -server_idle_timeout = 60 -client_idle_timeout = 300 -``` - -Reference: [Connection Timeouts](https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT) diff --git a/.continue/skills/supabase-postgres-best-practices/references/conn-limits.md b/.continue/skills/supabase-postgres-best-practices/references/conn-limits.md deleted file mode 100644 index cb3e400c..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/conn-limits.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Set Appropriate Connection Limits -impact: CRITICAL -impactDescription: Prevent database crashes and memory exhaustion -tags: connections, max-connections, limits, stability ---- - -## Set Appropriate Connection Limits - -Too many connections exhaust memory and degrade performance. Set limits based on available resources. - -**Incorrect (unlimited or excessive connections):** - -```sql --- Default max_connections = 100, but often increased blindly -show max_connections; -- 500 (way too high for 4GB RAM) - --- Each connection uses 1-3MB RAM --- 500 connections * 2MB = 1GB just for connections! --- Out of memory errors under load -``` - -**Correct (calculate based on resources):** - -```sql --- Formula: max_connections = (RAM in MB / 5MB per connection) - reserved --- For 4GB RAM: (4096 / 5) - 10 = ~800 theoretical max --- But practically, 100-200 is better for query performance - --- Recommended settings for 4GB RAM -alter system set max_connections = 100; - --- Also set work_mem appropriately --- work_mem * max_connections should not exceed 25% of RAM -alter system set work_mem = '8MB'; -- 8MB * 100 = 800MB max -``` - -Monitor connection usage: - -```sql -select count(*), state from pg_stat_activity group by state; -``` - -Reference: [Database Connections](https://supabase.com/docs/guides/platform/performance#connection-management) diff --git a/.continue/skills/supabase-postgres-best-practices/references/conn-pooling.md b/.continue/skills/supabase-postgres-best-practices/references/conn-pooling.md deleted file mode 100644 index e2ebd581..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/conn-pooling.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Use Connection Pooling for All Applications -impact: CRITICAL -impactDescription: Handle 10-100x more concurrent users -tags: connection-pooling, pgbouncer, performance, scalability ---- - -## Use Connection Pooling for All Applications - -Postgres connections are expensive (1-3MB RAM each). Without pooling, applications exhaust connections under load. - -**Incorrect (new connection per request):** - -```sql --- Each request creates a new connection --- Application code: db.connect() per request --- Result: 500 concurrent users = 500 connections = crashed database - --- Check current connections -select count(*) from pg_stat_activity; -- 487 connections! -``` - -**Correct (connection pooling):** - -```sql --- Use a pooler like PgBouncer between app and database --- Application connects to pooler, pooler reuses a small pool to Postgres - --- Configure pool_size based on: (CPU cores * 2) + spindle_count --- Example for 4 cores: pool_size = 10 - --- Result: 500 concurrent users share 10 actual connections -select count(*) from pg_stat_activity; -- 10 connections -``` - -Pool modes: - -- **Transaction mode**: connection returned after each transaction (best for most apps) -- **Session mode**: connection held for entire session (needed for prepared statements, temp tables) - -Reference: [Connection Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pooler) diff --git a/.continue/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md b/.continue/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md deleted file mode 100644 index 555547d8..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Use Prepared Statements Correctly with Pooling -impact: HIGH -impactDescription: Avoid prepared statement conflicts in pooled environments -tags: prepared-statements, connection-pooling, transaction-mode ---- - -## Use Prepared Statements Correctly with Pooling - -Prepared statements are tied to individual database connections. In transaction-mode pooling, connections are shared, causing conflicts. - -**Incorrect (named prepared statements with transaction pooling):** - -```sql --- Named prepared statement -prepare get_user as select * from users where id = $1; - --- In transaction mode pooling, next request may get different connection -execute get_user(123); --- ERROR: prepared statement "get_user" does not exist -``` - -**Correct (use unnamed statements or session mode):** - -```sql --- Option 1: Use unnamed prepared statements (most ORMs do this automatically) --- The query is prepared and executed in a single protocol message - --- Option 2: Deallocate after use in transaction mode -prepare get_user as select * from users where id = $1; -execute get_user(123); -deallocate get_user; - --- Option 3: Use session mode pooling (port 5432 vs 6543) --- Connection is held for entire session, prepared statements persist -``` - -Check your driver settings: - -```sql --- Many drivers use prepared statements by default --- Node.js pg: { prepare: false } to disable --- JDBC: prepareThreshold=0 to disable -``` - -Reference: [Prepared Statements with Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pool-modes) diff --git a/.continue/skills/supabase-postgres-best-practices/references/data-batch-inserts.md b/.continue/skills/supabase-postgres-best-practices/references/data-batch-inserts.md deleted file mode 100644 index 997947cb..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/data-batch-inserts.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Batch INSERT Statements for Bulk Data -impact: MEDIUM -impactDescription: 10-50x faster bulk inserts -tags: batch, insert, bulk, performance, copy ---- - -## Batch INSERT Statements for Bulk Data - -Individual INSERT statements have high overhead. Batch multiple rows in single statements or use COPY. - -**Incorrect (individual inserts):** - -```sql --- Each insert is a separate transaction and round trip -insert into events (user_id, action) values (1, 'click'); -insert into events (user_id, action) values (1, 'view'); -insert into events (user_id, action) values (2, 'click'); --- ... 1000 more individual inserts - --- 1000 inserts = 1000 round trips = slow -``` - -**Correct (batch insert):** - -```sql --- Multiple rows in single statement -insert into events (user_id, action) values - (1, 'click'), - (1, 'view'), - (2, 'click'), - -- ... up to ~1000 rows per batch - (999, 'view'); - --- One round trip for 1000 rows -``` - -For large imports, use COPY: - -```sql --- COPY is fastest for bulk loading -copy events (user_id, action, created_at) -from '/path/to/data.csv' -with (format csv, header true); - --- Or from stdin in application -copy events (user_id, action) from stdin with (format csv); -1,click -1,view -2,click -\. -``` - -Reference: [COPY](https://www.postgresql.org/docs/current/sql-copy.html) diff --git a/.continue/skills/supabase-postgres-best-practices/references/data-n-plus-one.md b/.continue/skills/supabase-postgres-best-practices/references/data-n-plus-one.md deleted file mode 100644 index 2109186f..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/data-n-plus-one.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Eliminate N+1 Queries with Batch Loading -impact: MEDIUM-HIGH -impactDescription: 10-100x fewer database round trips -tags: n-plus-one, batch, performance, queries ---- - -## Eliminate N+1 Queries with Batch Loading - -N+1 queries execute one query per item in a loop. Batch them into a single query using arrays or JOINs. - -**Incorrect (N+1 queries):** - -```sql --- First query: get all users -select id from users where active = true; -- Returns 100 IDs - --- Then N queries, one per user -select * from orders where user_id = 1; -select * from orders where user_id = 2; -select * from orders where user_id = 3; --- ... 97 more queries! - --- Total: 101 round trips to database -``` - -**Correct (single batch query):** - -```sql --- Collect IDs and query once with ANY -select * from orders where user_id = any(array[1, 2, 3, ...]); - --- Or use JOIN instead of loop -select u.id, u.name, o.* -from users u -left join orders o on o.user_id = u.id -where u.active = true; - --- Total: 1 round trip -``` - -Application pattern: - -```sql --- Instead of looping in application code: --- for user in users: db.query("SELECT * FROM orders WHERE user_id = $1", user.id) - --- Pass array parameter: -select * from orders where user_id = any($1::bigint[]); --- Application passes: [1, 2, 3, 4, 5, ...] -``` - -Reference: [N+1 Query Problem](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.continue/skills/supabase-postgres-best-practices/references/data-pagination.md b/.continue/skills/supabase-postgres-best-practices/references/data-pagination.md deleted file mode 100644 index 633d8393..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/data-pagination.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use Cursor-Based Pagination Instead of OFFSET -impact: MEDIUM-HIGH -impactDescription: Consistent O(1) performance regardless of page depth -tags: pagination, cursor, keyset, offset, performance ---- - -## Use Cursor-Based Pagination Instead of OFFSET - -OFFSET-based pagination scans all skipped rows, getting slower on deeper pages. Cursor pagination is O(1). - -**Incorrect (OFFSET pagination):** - -```sql --- Page 1: scans 20 rows -select * from products order by id limit 20 offset 0; - --- Page 100: scans 2000 rows to skip 1980 -select * from products order by id limit 20 offset 1980; - --- Page 10000: scans 200,000 rows! -select * from products order by id limit 20 offset 199980; -``` - -**Correct (cursor/keyset pagination):** - -```sql --- Page 1: get first 20 -select * from products order by id limit 20; --- Application stores last_id = 20 - --- Page 2: start after last ID -select * from products where id > 20 order by id limit 20; --- Uses index, always fast regardless of page depth - --- Page 10000: same speed as page 1 -select * from products where id > 199980 order by id limit 20; -``` - -For multi-column sorting: - -```sql --- Cursor must include all sort columns -select * from products -where (created_at, id) > ('2024-01-15 10:00:00', 12345) -order by created_at, id -limit 20; -``` - -Reference: [Pagination](https://supabase.com/docs/guides/database/pagination) diff --git a/.continue/skills/supabase-postgres-best-practices/references/data-upsert.md b/.continue/skills/supabase-postgres-best-practices/references/data-upsert.md deleted file mode 100644 index bc95e230..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/data-upsert.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use UPSERT for Insert-or-Update Operations -impact: MEDIUM -impactDescription: Atomic operation, eliminates race conditions -tags: upsert, on-conflict, insert, update ---- - -## Use UPSERT for Insert-or-Update Operations - -Using separate SELECT-then-INSERT/UPDATE creates race conditions. Use INSERT ... ON CONFLICT for atomic upserts. - -**Incorrect (check-then-insert race condition):** - -```sql --- Race condition: two requests check simultaneously -select * from settings where user_id = 123 and key = 'theme'; --- Both find nothing - --- Both try to insert -insert into settings (user_id, key, value) values (123, 'theme', 'dark'); --- One succeeds, one fails with duplicate key error! -``` - -**Correct (atomic UPSERT):** - -```sql --- Single atomic operation -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value, updated_at = now(); - --- Returns the inserted/updated row -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value -returning *; -``` - -Insert-or-ignore pattern: - -```sql --- Insert only if not exists (no update) -insert into page_views (page_id, user_id) -values (1, 123) -on conflict (page_id, user_id) do nothing; -``` - -Reference: [INSERT ON CONFLICT](https://www.postgresql.org/docs/current/sql-insert.html#SQL-ON-CONFLICT) diff --git a/.continue/skills/supabase-postgres-best-practices/references/lock-advisory.md b/.continue/skills/supabase-postgres-best-practices/references/lock-advisory.md deleted file mode 100644 index 572eaf0d..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/lock-advisory.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Use Advisory Locks for Application-Level Locking -impact: MEDIUM -impactDescription: Efficient coordination without row-level lock overhead -tags: advisory-locks, coordination, application-locks ---- - -## Use Advisory Locks for Application-Level Locking - -Advisory locks provide application-level coordination without requiring database rows to lock. - -**Incorrect (creating rows just for locking):** - -```sql --- Creating dummy rows to lock on -create table resource_locks ( - resource_name text primary key -); - -insert into resource_locks values ('report_generator'); - --- Lock by selecting the row -select * from resource_locks where resource_name = 'report_generator' for update; -``` - -**Correct (advisory locks):** - -```sql --- Session-level advisory lock (released on disconnect or unlock) -select pg_advisory_lock(hashtext('report_generator')); --- ... do exclusive work ... -select pg_advisory_unlock(hashtext('report_generator')); - --- Transaction-level lock (released on commit/rollback) -begin; -select pg_advisory_xact_lock(hashtext('daily_report')); --- ... do work ... -commit; -- Lock automatically released -``` - -Try-lock for non-blocking operations: - -```sql --- Returns immediately with true/false instead of waiting -select pg_try_advisory_lock(hashtext('resource_name')); - --- Use in application -if (acquired) { - -- Do work - select pg_advisory_unlock(hashtext('resource_name')); -} else { - -- Skip or retry later -} -``` - -Reference: [Advisory Locks](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) diff --git a/.continue/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md b/.continue/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md deleted file mode 100644 index 974da5ed..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Prevent Deadlocks with Consistent Lock Ordering -impact: MEDIUM-HIGH -impactDescription: Eliminate deadlock errors, improve reliability -tags: deadlocks, locking, transactions, ordering ---- - -## Prevent Deadlocks with Consistent Lock Ordering - -Deadlocks occur when transactions lock resources in different orders. Always -acquire locks in a consistent order. - -**Incorrect (inconsistent lock ordering):** - -```sql --- Transaction A -- Transaction B -begin; begin; -update accounts update accounts -set balance = balance - 100 set balance = balance - 50 -where id = 1; where id = 2; -- B locks row 2 - -update accounts update accounts -set balance = balance + 100 set balance = balance + 50 -where id = 2; -- A waits for B where id = 1; -- B waits for A - --- DEADLOCK! Both waiting for each other -``` - -**Correct (lock rows in consistent order first):** - -```sql --- Explicitly acquire locks in ID order before updating -begin; -select * from accounts where id in (1, 2) order by id for update; - --- Now perform updates in any order - locks already held -update accounts set balance = balance - 100 where id = 1; -update accounts set balance = balance + 100 where id = 2; -commit; -``` - -Alternative: use a single statement to update atomically: - -```sql --- Single statement acquires all locks atomically -begin; -update accounts -set balance = balance + case id - when 1 then -100 - when 2 then 100 -end -where id in (1, 2); -commit; -``` - -Detect deadlocks in logs: - -```sql --- Check for recent deadlocks -select * from pg_stat_database where deadlocks > 0; - --- Enable deadlock logging -set log_lock_waits = on; -set deadlock_timeout = '1s'; -``` - -Reference: -[Deadlocks](https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-DEADLOCKS) diff --git a/.continue/skills/supabase-postgres-best-practices/references/lock-short-transactions.md b/.continue/skills/supabase-postgres-best-practices/references/lock-short-transactions.md deleted file mode 100644 index e6b8ef26..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/lock-short-transactions.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Keep Transactions Short to Reduce Lock Contention -impact: MEDIUM-HIGH -impactDescription: 3-5x throughput improvement, fewer deadlocks -tags: transactions, locking, contention, performance ---- - -## Keep Transactions Short to Reduce Lock Contention - -Long-running transactions hold locks that block other queries. Keep transactions as short as possible. - -**Incorrect (long transaction with external calls):** - -```sql -begin; -select * from orders where id = 1 for update; -- Lock acquired - --- Application makes HTTP call to payment API (2-5 seconds) --- Other queries on this row are blocked! - -update orders set status = 'paid' where id = 1; -commit; -- Lock held for entire duration -``` - -**Correct (minimal transaction scope):** - -```sql --- Validate data and call APIs outside transaction --- Application: response = await paymentAPI.charge(...) - --- Only hold lock for the actual update -begin; -update orders -set status = 'paid', payment_id = $1 -where id = $2 and status = 'pending' -returning *; -commit; -- Lock held for milliseconds -``` - -Use `statement_timeout` to prevent runaway transactions: - -```sql --- Abort queries running longer than 30 seconds -set statement_timeout = '30s'; - --- Or per-session -set local statement_timeout = '5s'; -``` - -Reference: [Transaction Management](https://www.postgresql.org/docs/current/tutorial-transactions.html) diff --git a/.continue/skills/supabase-postgres-best-practices/references/lock-skip-locked.md b/.continue/skills/supabase-postgres-best-practices/references/lock-skip-locked.md deleted file mode 100644 index 77bdbb97..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/lock-skip-locked.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Use SKIP LOCKED for Non-Blocking Queue Processing -impact: MEDIUM-HIGH -impactDescription: 10x throughput for worker queues -tags: skip-locked, queue, workers, concurrency ---- - -## Use SKIP LOCKED for Non-Blocking Queue Processing - -When multiple workers process a queue, SKIP LOCKED allows workers to process different rows without waiting. - -**Incorrect (workers block each other):** - -```sql --- Worker 1 and Worker 2 both try to get next job -begin; -select * from jobs where status = 'pending' order by created_at limit 1 for update; --- Worker 2 waits for Worker 1's lock to release! -``` - -**Correct (SKIP LOCKED for parallel processing):** - -```sql --- Each worker skips locked rows and gets the next available -begin; -select * from jobs -where status = 'pending' -order by created_at -limit 1 -for update skip locked; - --- Worker 1 gets job 1, Worker 2 gets job 2 (no waiting) - -update jobs set status = 'processing' where id = $1; -commit; -``` - -Complete queue pattern: - -```sql --- Atomic claim-and-update in one statement -update jobs -set status = 'processing', worker_id = $1, started_at = now() -where id = ( - select id from jobs - where status = 'pending' - order by created_at - limit 1 - for update skip locked -) -returning *; -``` - -Reference: [SELECT FOR UPDATE SKIP LOCKED](https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE) diff --git a/.continue/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md b/.continue/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md deleted file mode 100644 index 542978c3..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use EXPLAIN ANALYZE to Diagnose Slow Queries -impact: LOW-MEDIUM -impactDescription: Identify exact bottlenecks in query execution -tags: explain, analyze, diagnostics, query-plan ---- - -## Use EXPLAIN ANALYZE to Diagnose Slow Queries - -EXPLAIN ANALYZE executes the query and shows actual timings, revealing the true performance bottlenecks. - -**Incorrect (guessing at performance issues):** - -```sql --- Query is slow, but why? -select * from orders where customer_id = 123 and status = 'pending'; --- "It must be missing an index" - but which one? -``` - -**Correct (use EXPLAIN ANALYZE):** - -```sql -explain (analyze, buffers, format text) -select * from orders where customer_id = 123 and status = 'pending'; - --- Output reveals the issue: --- Seq Scan on orders (cost=0.00..25000.00 rows=50 width=100) (actual time=0.015..450.123 rows=50 loops=1) --- Filter: ((customer_id = 123) AND (status = 'pending'::text)) --- Rows Removed by Filter: 999950 --- Buffers: shared hit=5000 read=15000 --- Planning Time: 0.150 ms --- Execution Time: 450.500 ms -``` - -Key things to look for: - -```sql --- Seq Scan on large tables = missing index --- Rows Removed by Filter = poor selectivity or missing index --- Buffers: read >> hit = data not cached, needs more memory --- Nested Loop with high loops = consider different join strategy --- Sort Method: external merge = work_mem too low -``` - -Reference: [EXPLAIN](https://supabase.com/docs/guides/database/inspect) diff --git a/.continue/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md b/.continue/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md deleted file mode 100644 index d7e82f1a..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Enable pg_stat_statements for Query Analysis -impact: LOW-MEDIUM -impactDescription: Identify top resource-consuming queries -tags: pg-stat-statements, monitoring, statistics, performance ---- - -## Enable pg_stat_statements for Query Analysis - -pg_stat_statements tracks execution statistics for all queries, helping identify slow and frequent queries. - -**Incorrect (no visibility into query patterns):** - -```sql --- Database is slow, but which queries are the problem? --- No way to know without pg_stat_statements -``` - -**Correct (enable and query pg_stat_statements):** - -```sql --- Enable the extension -create extension if not exists pg_stat_statements; - --- Find slowest queries by total time -select - calls, - round(total_exec_time::numeric, 2) as total_time_ms, - round(mean_exec_time::numeric, 2) as mean_time_ms, - query -from pg_stat_statements -order by total_exec_time desc -limit 10; - --- Find most frequent queries -select calls, query -from pg_stat_statements -order by calls desc -limit 10; - --- Reset statistics after optimization -select pg_stat_statements_reset(); -``` - -Key metrics to monitor: - -```sql --- Queries with high mean time (candidates for optimization) -select query, mean_exec_time, calls -from pg_stat_statements -where mean_exec_time > 100 -- > 100ms average -order by mean_exec_time desc; -``` - -Reference: [pg_stat_statements](https://supabase.com/docs/guides/database/extensions/pg_stat_statements) diff --git a/.continue/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md b/.continue/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md deleted file mode 100644 index e0e8ea0b..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Maintain Table Statistics with VACUUM and ANALYZE -impact: MEDIUM -impactDescription: 2-10x better query plans with accurate statistics -tags: vacuum, analyze, statistics, maintenance, autovacuum ---- - -## Maintain Table Statistics with VACUUM and ANALYZE - -Outdated statistics cause the query planner to make poor decisions. VACUUM reclaims space, ANALYZE updates statistics. - -**Incorrect (stale statistics):** - -```sql --- Table has 1M rows but stats say 1000 --- Query planner chooses wrong strategy -explain select * from orders where status = 'pending'; --- Shows: Seq Scan (because stats show small table) --- Actually: Index Scan would be much faster -``` - -**Correct (maintain fresh statistics):** - -```sql --- Manually analyze after large data changes -analyze orders; - --- Analyze specific columns used in WHERE clauses -analyze orders (status, created_at); - --- Check when tables were last analyzed -select - relname, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze -from pg_stat_user_tables -order by last_analyze nulls first; -``` - -Autovacuum tuning for busy tables: - -```sql --- Increase frequency for high-churn tables -alter table orders set ( - autovacuum_vacuum_scale_factor = 0.05, -- Vacuum at 5% dead tuples (default 20%) - autovacuum_analyze_scale_factor = 0.02 -- Analyze at 2% changes (default 10%) -); - --- Check autovacuum status -select * from pg_stat_progress_vacuum; -``` - -Reference: [VACUUM](https://supabase.com/docs/guides/database/database-size#vacuum-operations) diff --git a/.continue/skills/supabase-postgres-best-practices/references/query-composite-indexes.md b/.continue/skills/supabase-postgres-best-practices/references/query-composite-indexes.md deleted file mode 100644 index fea64523..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/query-composite-indexes.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Create Composite Indexes for Multi-Column Queries -impact: HIGH -impactDescription: 5-10x faster multi-column queries -tags: indexes, composite-index, multi-column, query-optimization ---- - -## Create Composite Indexes for Multi-Column Queries - -When queries filter on multiple columns, a composite index is more efficient than separate single-column indexes. - -**Incorrect (separate indexes require bitmap scan):** - -```sql --- Two separate indexes -create index orders_status_idx on orders (status); -create index orders_created_idx on orders (created_at); - --- Query must combine both indexes (slower) -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Correct (composite index):** - -```sql --- Single composite index (leftmost column first for equality checks) -create index orders_status_created_idx on orders (status, created_at); - --- Query uses one efficient index scan -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Column order matters** - place equality columns first, range columns last: - -```sql --- Good: status (=) before created_at (>) -create index idx on orders (status, created_at); - --- Works for: WHERE status = 'pending' --- Works for: WHERE status = 'pending' AND created_at > '2024-01-01' --- Does NOT work for: WHERE created_at > '2024-01-01' (leftmost prefix rule) -``` - -Reference: [Multicolumn Indexes](https://www.postgresql.org/docs/current/indexes-multicolumn.html) diff --git a/.continue/skills/supabase-postgres-best-practices/references/query-covering-indexes.md b/.continue/skills/supabase-postgres-best-practices/references/query-covering-indexes.md deleted file mode 100644 index 9d2a4947..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/query-covering-indexes.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Use Covering Indexes to Avoid Table Lookups -impact: MEDIUM-HIGH -impactDescription: 2-5x faster queries by eliminating heap fetches -tags: indexes, covering-index, include, index-only-scan ---- - -## Use Covering Indexes to Avoid Table Lookups - -Covering indexes include all columns needed by a query, enabling index-only scans that skip the table entirely. - -**Incorrect (index scan + heap fetch):** - -```sql -create index users_email_idx on users (email); - --- Must fetch name and created_at from table heap -select email, name, created_at from users where email = 'user@example.com'; -``` - -**Correct (index-only scan with INCLUDE):** - -```sql --- Include non-searchable columns in the index -create index users_email_idx on users (email) include (name, created_at); - --- All columns served from index, no table access needed -select email, name, created_at from users where email = 'user@example.com'; -``` - -Use INCLUDE for columns you SELECT but don't filter on: - -```sql --- Searching by status, but also need customer_id and total -create index orders_status_idx on orders (status) include (customer_id, total); - -select status, customer_id, total from orders where status = 'shipped'; -``` - -Reference: [Index-Only Scans](https://www.postgresql.org/docs/current/indexes-index-only-scans.html) diff --git a/.continue/skills/supabase-postgres-best-practices/references/query-index-types.md b/.continue/skills/supabase-postgres-best-practices/references/query-index-types.md deleted file mode 100644 index 93b32590..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/query-index-types.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Choose the Right Index Type for Your Data -impact: HIGH -impactDescription: 10-100x improvement with correct index type -tags: indexes, btree, gin, gist, brin, hash, index-types ---- - -## Choose the Right Index Type for Your Data - -Different index types excel at different query patterns. The default B-tree isn't always optimal. - -**Incorrect (B-tree for JSONB containment):** - -```sql --- B-tree cannot optimize containment operators -create index products_attrs_idx on products (attributes); -select * from products where attributes @> '{"color": "red"}'; --- Full table scan - B-tree doesn't support @> operator -``` - -**Correct (GIN for JSONB):** - -```sql --- GIN supports @>, ?, ?&, ?| operators -create index products_attrs_idx on products using gin (attributes); -select * from products where attributes @> '{"color": "red"}'; -``` - -Index type guide: - -```sql --- B-tree (default): =, <, >, BETWEEN, IN, IS NULL -create index users_created_idx on users (created_at); - --- GIN: arrays, JSONB, full-text search -create index posts_tags_idx on posts using gin (tags); - --- GiST: geometric data, range types, nearest-neighbor (KNN) queries -create index locations_idx on places using gist (location); - --- BRIN: large time-series tables (10-100x smaller) -create index events_time_idx on events using brin (created_at); - --- Hash: equality-only (slightly faster than B-tree for =) -create index sessions_token_idx on sessions using hash (token); -``` - -Reference: [Index Types](https://www.postgresql.org/docs/current/indexes-types.html) diff --git a/.continue/skills/supabase-postgres-best-practices/references/query-missing-indexes.md b/.continue/skills/supabase-postgres-best-practices/references/query-missing-indexes.md deleted file mode 100644 index e6daace7..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/query-missing-indexes.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Add Indexes on WHERE and JOIN Columns -impact: CRITICAL -impactDescription: 100-1000x faster queries on large tables -tags: indexes, performance, sequential-scan, query-optimization ---- - -## Add Indexes on WHERE and JOIN Columns - -Queries filtering or joining on unindexed columns cause full table scans, which become exponentially slower as tables grow. - -**Incorrect (sequential scan on large table):** - -```sql --- No index on customer_id causes full table scan -select * from orders where customer_id = 123; - --- EXPLAIN shows: Seq Scan on orders (cost=0.00..25000.00 rows=100 width=85) -``` - -**Correct (index scan):** - -```sql --- Create index on frequently filtered column -create index orders_customer_id_idx on orders (customer_id); - -select * from orders where customer_id = 123; - --- EXPLAIN shows: Index Scan using orders_customer_id_idx (cost=0.42..8.44 rows=100 width=85) -``` - -For JOIN columns, always index the foreign key side: - -```sql --- Index the referencing column -create index orders_customer_id_idx on orders (customer_id); - -select c.name, o.total -from customers c -join orders o on o.customer_id = c.id; -``` - -Reference: [Query Optimization](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.continue/skills/supabase-postgres-best-practices/references/query-partial-indexes.md b/.continue/skills/supabase-postgres-best-practices/references/query-partial-indexes.md deleted file mode 100644 index 3e61a341..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/query-partial-indexes.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use Partial Indexes for Filtered Queries -impact: HIGH -impactDescription: 5-20x smaller indexes, faster writes and queries -tags: indexes, partial-index, query-optimization, storage ---- - -## Use Partial Indexes for Filtered Queries - -Partial indexes only include rows matching a WHERE condition, making them smaller and faster when queries consistently filter on the same condition. - -**Incorrect (full index includes irrelevant rows):** - -```sql --- Index includes all rows, even soft-deleted ones -create index users_email_idx on users (email); - --- Query always filters active users -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -**Correct (partial index matches query filter):** - -```sql --- Index only includes active users -create index users_active_email_idx on users (email) -where deleted_at is null; - --- Query uses the smaller, faster index -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -Common use cases for partial indexes: - -```sql --- Only pending orders (status rarely changes once completed) -create index orders_pending_idx on orders (created_at) -where status = 'pending'; - --- Only non-null values -create index products_sku_idx on products (sku) -where sku is not null; -``` - -Reference: [Partial Indexes](https://www.postgresql.org/docs/current/indexes-partial.html) diff --git a/.continue/skills/supabase-postgres-best-practices/references/schema-constraints.md b/.continue/skills/supabase-postgres-best-practices/references/schema-constraints.md deleted file mode 100644 index 1d2ef8f9..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/schema-constraints.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Add Constraints Safely in Migrations -impact: HIGH -impactDescription: Prevents migration failures and enables idempotent schema changes -tags: constraints, migrations, schema, alter-table ---- - -## Add Constraints Safely in Migrations - -PostgreSQL does not support `ADD CONSTRAINT IF NOT EXISTS`. Migrations using this syntax will fail. - -**Incorrect (causes syntax error):** - -```sql --- ERROR: syntax error at or near "not" (SQLSTATE 42601) -alter table public.profiles -add constraint if not exists profiles_birthchart_id_unique unique (birthchart_id); -``` - -**Correct (idempotent constraint creation):** - -```sql --- Use DO block to check before adding -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_unique' - and conrelid = 'public.profiles'::regclass - ) then - alter table public.profiles - add constraint profiles_birthchart_id_unique unique (birthchart_id); - end if; -end $$; -``` - -For all constraint types: - -```sql --- Check constraints -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'check_age_positive' - ) then - alter table users add constraint check_age_positive check (age > 0); - end if; -end $$; - --- Foreign keys -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_fkey' - ) then - alter table profiles - add constraint profiles_birthchart_id_fkey - foreign key (birthchart_id) references birthcharts(id); - end if; -end $$; -``` - -Check if constraint exists: - -```sql --- Query to check constraint existence -select conname, contype, pg_get_constraintdef(oid) -from pg_constraint -where conrelid = 'public.profiles'::regclass; - --- contype values: --- 'p' = PRIMARY KEY --- 'f' = FOREIGN KEY --- 'u' = UNIQUE --- 'c' = CHECK -``` - -Reference: [Constraints](https://www.postgresql.org/docs/current/ddl-constraints.html) diff --git a/.continue/skills/supabase-postgres-best-practices/references/schema-data-types.md b/.continue/skills/supabase-postgres-best-practices/references/schema-data-types.md deleted file mode 100644 index f253a581..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/schema-data-types.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Choose Appropriate Data Types -impact: HIGH -impactDescription: 50% storage reduction, faster comparisons -tags: data-types, schema, storage, performance ---- - -## Choose Appropriate Data Types - -Using the right data types reduces storage, improves query performance, and prevents bugs. - -**Incorrect (wrong data types):** - -```sql -create table users ( - id int, -- Will overflow at 2.1 billion - email varchar(255), -- Unnecessary length limit - created_at timestamp, -- Missing timezone info - is_active varchar(5), -- String for boolean - price varchar(20) -- String for numeric -); -``` - -**Correct (appropriate data types):** - -```sql -create table users ( - id bigint generated always as identity primary key, -- 9 quintillion max - email text, -- No artificial limit, same performance as varchar - created_at timestamptz, -- Always store timezone-aware timestamps - is_active boolean default true, -- 1 byte vs variable string length - price numeric(10,2) -- Exact decimal arithmetic -); -``` - -Key guidelines: - -```sql --- IDs: use bigint, not int (future-proofing) --- Strings: use text, not varchar(n) unless constraint needed --- Time: use timestamptz, not timestamp --- Money: use numeric, not float (precision matters) --- Enums: use text with check constraint or create enum type -``` - -Reference: [Data Types](https://www.postgresql.org/docs/current/datatype.html) diff --git a/.continue/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md b/.continue/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md deleted file mode 100644 index 6c3d6ff6..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Index Foreign Key Columns -impact: HIGH -impactDescription: 10-100x faster JOINs and CASCADE operations -tags: foreign-key, indexes, joins, schema ---- - -## Index Foreign Key Columns - -Postgres does not automatically index foreign key columns. Missing indexes cause slow JOINs and CASCADE operations. - -**Incorrect (unindexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- No index on customer_id! --- JOINs and ON DELETE CASCADE both require full table scan -select * from orders where customer_id = 123; -- Seq Scan -delete from customers where id = 123; -- Locks table, scans all orders -``` - -**Correct (indexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- Always index the FK column -create index orders_customer_id_idx on orders (customer_id); - --- Now JOINs and cascades are fast -select * from orders where customer_id = 123; -- Index Scan -delete from customers where id = 123; -- Uses index, fast cascade -``` - -Find missing FK indexes: - -```sql -select - conrelid::regclass as table_name, - a.attname as fk_column -from pg_constraint c -join pg_attribute a on a.attrelid = c.conrelid and a.attnum = any(c.conkey) -where c.contype = 'f' - and not exists ( - select 1 from pg_index i - where i.indrelid = c.conrelid and a.attnum = any(i.indkey) - ); -``` - -Reference: [Foreign Keys](https://www.postgresql.org/docs/current/ddl-constraints.html#DDL-CONSTRAINTS-FK) diff --git a/.continue/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md b/.continue/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md deleted file mode 100644 index f0072940..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use Lowercase Identifiers for Compatibility -impact: MEDIUM -impactDescription: Avoid case-sensitivity bugs with tools, ORMs, and AI assistants -tags: naming, identifiers, case-sensitivity, schema, conventions ---- - -## Use Lowercase Identifiers for Compatibility - -PostgreSQL folds unquoted identifiers to lowercase. Quoted mixed-case identifiers require quotes forever and cause issues with tools, ORMs, and AI assistants that may not recognize them. - -**Incorrect (mixed-case identifiers):** - -```sql --- Quoted identifiers preserve case but require quotes everywhere -CREATE TABLE "Users" ( - "userId" bigint PRIMARY KEY, - "firstName" text, - "lastName" text -); - --- Must always quote or queries fail -SELECT "firstName" FROM "Users" WHERE "userId" = 1; - --- This fails - Users becomes users without quotes -SELECT firstName FROM Users; --- ERROR: relation "users" does not exist -``` - -**Correct (lowercase snake_case):** - -```sql --- Unquoted lowercase identifiers are portable and tool-friendly -CREATE TABLE users ( - user_id bigint PRIMARY KEY, - first_name text, - last_name text -); - --- Works without quotes, recognized by all tools -SELECT first_name FROM users WHERE user_id = 1; -``` - -Common sources of mixed-case identifiers: - -```sql --- ORMs often generate quoted camelCase - configure them to use snake_case --- Migrations from other databases may preserve original casing --- Some GUI tools quote identifiers by default - disable this - --- If stuck with mixed-case, create views as a compatibility layer -CREATE VIEW users AS SELECT "userId" AS user_id, "firstName" AS first_name FROM "Users"; -``` - -Reference: [Identifiers and Key Words](https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS) diff --git a/.continue/skills/supabase-postgres-best-practices/references/schema-partitioning.md b/.continue/skills/supabase-postgres-best-practices/references/schema-partitioning.md deleted file mode 100644 index 13137a03..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/schema-partitioning.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Partition Large Tables for Better Performance -impact: MEDIUM-HIGH -impactDescription: 5-20x faster queries and maintenance on large tables -tags: partitioning, large-tables, time-series, performance ---- - -## Partition Large Tables for Better Performance - -Partitioning splits a large table into smaller pieces, improving query performance and maintenance operations. - -**Incorrect (single large table):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz, - data jsonb -); - --- 500M rows, queries scan everything -select * from events where created_at > '2024-01-01'; -- Slow -vacuum events; -- Takes hours, locks table -``` - -**Correct (partitioned by time range):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz not null, - data jsonb -) partition by range (created_at); - --- Create partitions for each month -create table events_2024_01 partition of events - for values from ('2024-01-01') to ('2024-02-01'); - -create table events_2024_02 partition of events - for values from ('2024-02-01') to ('2024-03-01'); - --- Queries only scan relevant partitions -select * from events where created_at > '2024-01-15'; -- Only scans events_2024_01+ - --- Drop old data instantly -drop table events_2023_01; -- Instant vs DELETE taking hours -``` - -When to partition: - -- Tables > 100M rows -- Time-series data with date-based queries -- Need to efficiently drop old data - -Reference: [Table Partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) diff --git a/.continue/skills/supabase-postgres-best-practices/references/schema-primary-keys.md b/.continue/skills/supabase-postgres-best-practices/references/schema-primary-keys.md deleted file mode 100644 index fb0fbb16..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/schema-primary-keys.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Select Optimal Primary Key Strategy -impact: HIGH -impactDescription: Better index locality, reduced fragmentation -tags: primary-key, identity, uuid, serial, schema ---- - -## Select Optimal Primary Key Strategy - -Primary key choice affects insert performance, index size, and replication -efficiency. - -**Incorrect (problematic PK choices):** - -```sql --- identity is the SQL-standard approach -create table users ( - id serial primary key -- Works, but IDENTITY is recommended -); - --- Random UUIDs (v4) cause index fragmentation -create table orders ( - id uuid default gen_random_uuid() primary key -- UUIDv4 = random = scattered inserts -); -``` - -**Correct (optimal PK strategies):** - -```sql --- Use IDENTITY for sequential IDs (SQL-standard, best for most cases) -create table users ( - id bigint generated always as identity primary key -); - --- For distributed systems needing UUIDs, use UUIDv7 (time-ordered) --- Requires pg_uuidv7 extension: create extension pg_uuidv7; -create table orders ( - id uuid default uuid_generate_v7() primary key -- Time-ordered, no fragmentation -); - --- Alternative: time-prefixed IDs for sortable, distributed IDs (no extension needed) -create table events ( - id text default concat( - to_char(now() at time zone 'utc', 'YYYYMMDDHH24MISSMS'), - gen_random_uuid()::text - ) primary key -); -``` - -Guidelines: - -- Single database: `bigint identity` (sequential, 8 bytes, SQL-standard) -- Distributed/exposed IDs: UUIDv7 (requires pg_uuidv7) or ULID (time-ordered, no - fragmentation) -- `serial` works but `identity` is SQL-standard and preferred for new - applications -- Avoid random UUIDs (v4) as primary keys on large tables (causes index - fragmentation) - -Reference: -[Identity Columns](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-PARMS-GENERATED-IDENTITY) diff --git a/.continue/skills/supabase-postgres-best-practices/references/security-privileges.md b/.continue/skills/supabase-postgres-best-practices/references/security-privileges.md deleted file mode 100644 index 448ec345..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/security-privileges.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Apply Principle of Least Privilege -impact: MEDIUM -impactDescription: Reduced attack surface, better audit trail -tags: privileges, security, roles, permissions ---- - -## Apply Principle of Least Privilege - -Grant only the minimum permissions required. Never use superuser for application queries. - -**Incorrect (overly broad permissions):** - -```sql --- Application uses superuser connection --- Or grants ALL to application role -grant all privileges on all tables in schema public to app_user; -grant all privileges on all sequences in schema public to app_user; - --- Any SQL injection becomes catastrophic --- drop table users; cascades to everything -``` - -**Correct (minimal, specific grants):** - -```sql --- Create role with no default privileges -create role app_readonly nologin; - --- Grant only SELECT on specific tables -grant usage on schema public to app_readonly; -grant select on public.products, public.categories to app_readonly; - --- Create role for writes with limited scope -create role app_writer nologin; -grant usage on schema public to app_writer; -grant select, insert, update on public.orders to app_writer; -grant usage on sequence orders_id_seq to app_writer; --- No DELETE permission - --- Login role inherits from these -create role app_user login password 'xxx'; -grant app_writer to app_user; -``` - -Revoke public defaults: - -```sql --- Revoke default public access -revoke all on schema public from public; -revoke all on all tables in schema public from public; -``` - -Reference: [Roles and Privileges](https://supabase.com/blog/postgres-roles-and-privileges) diff --git a/.continue/skills/supabase-postgres-best-practices/references/security-rls-basics.md b/.continue/skills/supabase-postgres-best-practices/references/security-rls-basics.md deleted file mode 100644 index c61e1a85..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/security-rls-basics.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Enable Row Level Security for Multi-Tenant Data -impact: CRITICAL -impactDescription: Database-enforced tenant isolation, prevent data leaks -tags: rls, row-level-security, multi-tenant, security ---- - -## Enable Row Level Security for Multi-Tenant Data - -Row Level Security (RLS) enforces data access at the database level, ensuring users only see their own data. - -**Incorrect (application-level filtering only):** - -```sql --- Relying only on application to filter -select * from orders where user_id = $current_user_id; - --- Bug or bypass means all data is exposed! -select * from orders; -- Returns ALL orders -``` - -**Correct (database-enforced RLS):** - -```sql --- Enable RLS on the table -alter table orders enable row level security; - --- Create policy for users to see only their orders -create policy orders_user_policy on orders - for all - using (user_id = current_setting('app.current_user_id')::bigint); - --- Force RLS even for table owners -alter table orders force row level security; - --- Set user context and query -set app.current_user_id = '123'; -select * from orders; -- Only returns orders for user 123 -``` - -Policy for authenticated role: - -```sql -create policy orders_user_policy on orders - for all - to authenticated - using (user_id = auth.uid()); -``` - -Reference: [Row Level Security](https://supabase.com/docs/guides/database/postgres/row-level-security) diff --git a/.continue/skills/supabase-postgres-best-practices/references/security-rls-performance.md b/.continue/skills/supabase-postgres-best-practices/references/security-rls-performance.md deleted file mode 100644 index b32d92f7..00000000 --- a/.continue/skills/supabase-postgres-best-practices/references/security-rls-performance.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Optimize RLS Policies for Performance -impact: HIGH -impactDescription: 5-10x faster RLS queries with proper patterns -tags: rls, performance, security, optimization ---- - -## Optimize RLS Policies for Performance - -Poorly written RLS policies can cause severe performance issues. Use subqueries and indexes strategically. - -**Incorrect (function called for every row):** - -```sql -create policy orders_policy on orders - using (auth.uid() = user_id); -- auth.uid() called per row! - --- With 1M rows, auth.uid() is called 1M times -``` - -**Correct (wrap functions in SELECT):** - -```sql -create policy orders_policy on orders - using ((select auth.uid()) = user_id); -- Called once, cached - --- 100x+ faster on large tables -``` - -Use security definer functions for complex checks: - -```sql --- Create helper function (runs as definer, bypasses RLS) -create or replace function is_team_member(team_id bigint) -returns boolean -language sql -security definer -set search_path = '' -as $$ - select exists ( - select 1 from public.team_members - where team_id = $1 and user_id = (select auth.uid()) - ); -$$; - --- Use in policy (indexed lookup, not per-row check) -create policy team_orders_policy on orders - using ((select is_team_member(team_id))); -``` - -Always add indexes on columns used in RLS policies: - -```sql -create index orders_user_id_idx on orders (user_id); -``` - -Reference: [RLS Performance](https://supabase.com/docs/guides/database/postgres/row-level-security#rls-performance-recommendations) diff --git a/.env.vercel.production.template b/.env.vercel.production.template deleted file mode 100644 index 8f9cad1d..00000000 --- a/.env.vercel.production.template +++ /dev/null @@ -1,220 +0,0 @@ -# TradeHax production environment template for Vercel -# Copy values into Vercel -> Project -> Settings -> Environment Variables (Production) - -# ============================================ -# Core environment -# ============================================ -NODE_ENV=production - -# ============================================ -# Domains -# ============================================ -NEXT_PUBLIC_SITE_URL=https://tradehax.net -NEXT_PUBLIC_SITE_URL_ALT=https://tradehaxai.tech -NEXTAUTH_URL=https://tradehax.net - -# ============================================ -# Auth / identity -# ============================================ -NEXTAUTH_SECRET=replace_with_32_plus_char_secret -JWT_SECRET=replace_with_32_plus_char_secret - -# Optional OAuth providers -DISCORD_CLIENT_ID=1450053974018494515 -DISCORD_CLIENT_SECRET= -DISCORD_REDIRECT_URI=https://tradehax.net/auth/discord/callback -DISCORD_BOT_TOKEN= -DISCORD_GUILD_ID= -DISCORD_APPLICATION_ID=1450053974018494515 -DISCORD_PUBLIC_KEY=af33c2c6795e6ea3616748fc160bde9096844f2fc78cdde07035cf35633c4267 -DISCORD_INTERACTIONS_ENDPOINT_URL=https://tradehax.net/api/interactions -DISCORD_LINKED_ROLES_VERIFICATION_URL=https://tradehax.net -DISCORD_OAUTH_AUTHORIZE_URL=https://discord.com/oauth2/authorize?client_id=1450053974018494515 -GOOGLE_CLIENT_ID= -GOOGLE_CLIENT_SECRET= -FACEBOOK_CLIENT_ID= -FACEBOOK_CLIENT_SECRET= - -# ============================================ -# AI - Hugging Face (server-only) -# ============================================ -HF_API_TOKEN=hf_replace_with_real_token -GROQ_API_KEY=replace_with_real_groq_token -HF_MODEL_ID=mistralai/Mistral-7B-Instruct-v0.1 -HF_IMAGE_MODEL_ID=stabilityai/stable-diffusion-2-1 -HF_USE_LOCAL_MODEL=false -LLM_TEMPERATURE=0.85 -LLM_MAX_LENGTH=768 -LLM_TOP_P=0.95 -HF_IMAGE_STEPS=30 -HF_IMAGE_GUIDANCE_SCALE=6.5 -HF_IMAGE_NEGATIVE_PROMPT_DEFAULT=blurry, low quality, watermark, logo, text overlay, disfigured -TRADEHAX_LLM_OPEN_MODE=true -TRADEHAX_IMAGE_OPEN_MODE=true -TRADEHAX_SUPERUSER_CODE=replace_this_superuser_code -TRADEHAX_DATA_HASH_SALT=replace_this_hash_salt -TRADEHAX_BEHAVIOR_MAX_RECORDS=5000 -TRADEHAX_DEFAULT_TRAINING_CONSENT=false -TRADEHAX_ALLOW_IMPLICIT_ANALYTICS_CONSENT=false -TRADEHAX_AI_TRAINING_STORAGE=supabase -TRADEHAX_SUPABASE_AI_BENCHMARKS_TABLE=ai_training_benchmarks -TRADEHAX_SUPABASE_AI_PERSONALIZATION_TABLE=ai_trading_personalization_profiles -TRADEHAX_SUPABASE_AI_TRADE_OUTCOMES_TABLE=ai_trading_trade_outcomes -TRADEHAX_HIVEMIND_LEGACY_ACCOUNT_ID=legacy-root -TRADEHAX_HIVEMIND_DATA_RETENTION_DAYS=36500 -TRADEHAX_HIVEMIND_MIN_TRAINING_ROWS=500 -TRADEHAX_HIVEMIND_BENCHMARK_MIN_SCORE=0.62 -TRADEHAX_HIVEMIND_BENCHMARK_MIN_DELTA=-0.03 -TRADEHAX_HIVEMIND_BENCHMARK_WINDOW_SIZE=10 -TRADEHAX_HIVEMIND_LINEAGE_SECRET= -TRADEHAX_HIVEMIND_MEMORY_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 - -# ============================================ -# Trading / data feeds -# ============================================ -BLOOMBERG_API_KEY= -BPIPE_TOKEN= -UNUSUALWHALES_API_KEY= -POLYGON_API_KEY= -FINNHUB_API_KEY= -INTELLIGENCE_DATA_PROVIDER=vendor -INTELLIGENCE_VENDOR_NAME=unusualwhales -TRADEHAX_INTELLIGENCE_PROVIDER= -TRADEHAX_INTELLIGENCE_CACHE_MS=15000 -UNUSUALWHALES_BASE_URL=https://api.unusualwhales.com -UNUSUALWHALES_FLOW_ENDPOINT= -UNUSUALWHALES_DARK_POOL_ENDPOINT= -UNUSUALWHALES_NEWS_ENDPOINT= -POLYGON_BASE_URL=https://api.polygon.io -BLOOMBERG_PROXY_BASE_URL= -BLOOMBERG_FLOW_ENDPOINT= -BLOOMBERG_DARK_POOL_ENDPOINT= -BLOOMBERG_NEWS_ENDPOINT= -TRADEHAX_INTELLIGENCE_WS_ENABLED=false -TRADEHAX_INTELLIGENCE_WS_URL= -TRADEHAX_INTELLIGENCE_WS_PROTOCOL= -TRADEHAX_INTELLIGENCE_WS_RECONNECT_MS=4000 - -# ============================================ -# Solana / Web3 -# ============================================ -NEXT_PUBLIC_SOLANA_NETWORK=mainnet-beta -NEXT_PUBLIC_SOLANA_RPC=https://api.mainnet-beta.solana.com -NEXT_PUBLIC_HELIUS_API_KEY= -NEXT_PUBLIC_CLAIM_API_BASE=https://tradehax.net/api/claim - -# ============================================ -# Contact + booking -# ============================================ -NEXT_PUBLIC_CONTACT_EMAIL=darkmodder33@proton.me -NEXT_PUBLIC_CONTACT_PHONE_E164=+18563208570 -NEXT_PUBLIC_CONTACT_PHONE_DISPLAY=(856) 320-8570 -NEXT_PUBLIC_EMERGENCY_PHONE_E164=+16094128878 -NEXT_PUBLIC_EMERGENCY_PHONE_DISPLAY=(609) 412-8878 -NEXT_PUBLIC_CASHAPP_TAG=$irishLivesMatter -NEXT_PUBLIC_SUPPORT_MESSAGE=You can support our Work CashApp $irishLivesMatter or https://buymeacoffee.com/hackavelli -NEXT_PUBLIC_BUYMEACOFFEE_URL=https://buymeacoffee.com/hackavelli -NEXT_PUBLIC_EMERGENCY_UNLOCK_DONATION_USD=5 -NEXT_PUBLIC_TEXT_PREFILL=Hi TradeHax AI, I want to schedule a service. -NEXT_PUBLIC_GOOGLE_CALENDAR_EMBED_URL= -NEXT_PUBLIC_GOOGLE_MEET_BOOKING_URL=https://calendar.app.google/hhBXuJjfaApoXVzc6 -NEXT_PUBLIC_GOOGLE_CALENDAR_OPEN_URL= -NEXT_PUBLIC_BOOKING_PRIMARY_URL= -NEXT_PUBLIC_BOOKING_GUITAR_LESSONS= -NEXT_PUBLIC_BOOKING_WEB3_CONSULT= -NEXT_PUBLIC_BOOKING_TRADING_CONSULT= -NEXT_PUBLIC_BOOKING_TECH_SUPPORT= -NEXT_PUBLIC_BOOKING_SOCIAL_MEDIA_CONSULT= -NEXT_PUBLIC_BOOKING_IT_MANAGEMENT= -NEXT_PUBLIC_BOOKING_APP_DEVELOPMENT= -NEXT_PUBLIC_BOOKING_DATABASE_CONSULT= -NEXT_PUBLIC_BOOKING_ECOMMERCE_CONSULT= - -# ============================================ -# Social -# ============================================ -NEXT_PUBLIC_SOCIAL_X_URL=https://x.com/tradehaxai -NEXT_PUBLIC_SOCIAL_YOUTUBE_URL=https://www.youtube.com/@tradehaxnet -NEXT_PUBLIC_SOCIAL_GITHUB_URL=https://github.com/your-org/your-repo -NEXT_PUBLIC_SOCIAL_FACEBOOK_URL= -NEXT_PUBLIC_SOCIAL_INSTAGRAM_URL= - -# ============================================ -# Analytics -# ============================================ -NEXT_PUBLIC_GA_MEASUREMENT_ID=G-XXXXXXXXXX -NEXT_PUBLIC_GOOGLE_ANALYTICS_ID= -NEXT_PUBLIC_ENABLE_ANALYTICS=true - -# ============================================ -# Affiliate links -# ============================================ -NEXT_PUBLIC_COINBASE_REF= -NEXT_PUBLIC_BINANCE_REF= -NEXT_PUBLIC_PHANTOM_REF= -NEXT_PUBLIC_LEDGER_REF= -NEXT_PUBLIC_TRADINGVIEW_REF= -NEXT_PUBLIC_COINGECKO_REF= -NEXT_PUBLIC_ALCHEMY_REF= - -# ============================================ -# Payments / monetization -# ============================================ -NEXT_PUBLIC_ENABLE_PAYMENTS=false -NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY= -STRIPE_SECRET_KEY= -STRIPE_WEBHOOK_SECRET= -TRADEHAX_STRIPE_CHECKOUT_URL= -TRADEHAX_COINBASE_CHECKOUT_URL= -TRADEHAX_ALLOW_PAYMENT_SIMULATION=false -TRADEHAX_WEBHOOK_SECRET=replace_this_secret -TRADEHAX_ADMIN_KEY=replace_this_admin_key -AI_SERVER_API_KEY=replace_with_strong_ai_server_key -VERCEL_AUTOMATION_BYPASS_SECRET=otWDNt0dMxhdDDdkNEPwodop676ofPP1 - -# ============================================ -# Intelligence Phase 3 persistence -# ============================================ -TRADEHAX_INTELLIGENCE_STORAGE=supabase -SUPABASE_URL= -SUPABASE_SERVICE_ROLE_KEY= -TRADEHAX_SUPABASE_WATCHLIST_TABLE=tradehax_watchlist_items -TRADEHAX_SUPABASE_ALERTS_TABLE=tradehax_intelligence_alerts - -# ============================================ -# Discord alert routing + threads -# ============================================ -TRADEHAX_DISCORD_WEBHOOK= -TRADEHAX_DISCORD_WEBHOOK_FREE= -TRADEHAX_DISCORD_WEBHOOK_BASIC= -TRADEHAX_DISCORD_WEBHOOK_PRO= -TRADEHAX_DISCORD_WEBHOOK_ELITE= -TRADEHAX_DISCORD_CHANNEL_FREE=intel-community -TRADEHAX_DISCORD_CHANNEL_BASIC=intel-basic -TRADEHAX_DISCORD_CHANNEL_PRO=intel-pro -TRADEHAX_DISCORD_CHANNEL_ELITE=intel-elite -TRADEHAX_DISCORD_DEFAULT_THREAD_ID=1421509686443905094 -TRADEHAX_DISCORD_THREAD_OPTIONS_FLOW= -TRADEHAX_DISCORD_THREAD_DARK_POOL= -TRADEHAX_DISCORD_THREAD_CRYPTO_FLOW= -TRADEHAX_DISCORD_THREAD_CATALYST_NEWS= -TRADEHAX_DISCORD_THREAD_URGENT= -TRADEHAX_DISCORD_THREAD_WATCH= -TRADEHAX_DISCORD_THREAD_INFO= -TRADEHAX_DISCORD_SERVER_ID=1421509686443905094 - -# ============================================ -# Vercel project metadata -# ============================================ -VERCEL_SCOPE=your-vercel-scope -VERCEL_TEAM_SLUG=your-vercel-team -VERCEL_TEAM_ID=team_Axs3glaY6k3cT2zJb8H3DZ9c -VERCEL_ORG_ID=team_Axs3glaY6k3cT2zJb8H3DZ9c -VERCEL_PROJECT_ID=prj_LDmkGrAq06c1DJcH98BeN6GYhZpW - -# ============================================ -# Feature toggles -# ============================================ -NEXT_PUBLIC_ENABLE_WEB3_AUTH=true -NEXT_PUBLIC_ENABLE_SOCIAL_AUTH=false -ENABLE_RATE_LIMITING=true diff --git a/.github/workflows/aggressive-proof-gate.yml b/.github/workflows/aggressive-proof-gate.yml deleted file mode 100644 index 79d817e8..00000000 --- a/.github/workflows/aggressive-proof-gate.yml +++ /dev/null @@ -1,104 +0,0 @@ -name: Aggressive Proof Gate - -on: - schedule: - - cron: "12 6 * * *" - workflow_dispatch: - inputs: - strict: - description: "Run strict aggressive mode (fail-fast)" - required: false - default: "false" - minScore: - description: "Minimum excellence score threshold (0-100)" - required: false - default: "95" - push: - branches: [main] - paths: - - "scripts/aggressive-dev-loop.js" - - "scripts/verify-aggressive-dev-report.js" - - "package.json" - - ".github/workflows/aggressive-proof-gate.yml" - -jobs: - proof-gate: - runs-on: ubuntu-latest - timeout-minutes: 60 - permissions: - contents: read - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup Node - uses: actions/setup-node@v4 - with: - node-version: "20" - cache: "npm" - - - name: Install dependencies - run: npm ci - - - name: Resolve gate settings - id: settings - shell: bash - run: | - STRICT_INPUT="${{ github.event.inputs.strict || 'false' }}" - SCORE_INPUT="${{ github.event.inputs.minScore || '95' }}" - if [ "${{ github.event_name }}" = "schedule" ]; then - STRICT_INPUT="false" - SCORE_INPUT="95" - fi - echo "strict=$STRICT_INPUT" >> "$GITHUB_OUTPUT" - echo "minScore=$SCORE_INPUT" >> "$GITHUB_OUTPUT" - - - name: Run aggressive loop (standard) - if: ${{ steps.settings.outputs.strict != 'true' }} - run: npm run dev:aggressive:quick - - - name: Run aggressive loop (strict) - if: ${{ steps.settings.outputs.strict == 'true' }} - run: npm run dev:aggressive:quick:strict - - - name: Verify report integrity + excellence threshold - run: npm run dev:aggressive:verify -- --min-score ${{ steps.settings.outputs.minScore }} - - - name: Publish proof summary - if: always() - shell: bash - run: | - node -e " - const fs = require('fs'); - const p = '.artifacts/aggressive-dev-report.json'; - if (!fs.existsSync(p)) { - process.stdout.write('## Aggressive Proof Summary\\n\\nNo report generated.\\n'); - process.exit(0); - } - const r = JSON.parse(fs.readFileSync(p, 'utf8')); - const lines = [ - '## Aggressive Proof Summary', - '', - `- Success: ${r.execution?.success ? 'yes' : 'no'}`, - `- Mode: ${r.metadata?.mode}`, - `- Strict: ${r.metadata?.strict ? 'yes' : 'no'}`, - `- Coverage: ${r.summary?.coveragePct}%`, - `- Pass rate: ${r.summary?.passRatePct}%`, - `- Passed steps: ${r.summary?.passed}`, - `- Failed steps: ${r.summary?.failed}`, - `- Integrity hash: ${r.integrity?.hash}`, - ]; - process.stdout.write(lines.join('\\n') + '\\n'); - " >> "$GITHUB_STEP_SUMMARY" - - - name: Upload aggressive proof artifacts - if: always() - uses: actions/upload-artifact@v4 - with: - name: aggressive-proof-artifacts - retention-days: 14 - path: | - .artifacts/aggressive-dev-report.json - .artifacts/aggressive-dev-report.sha256 - .artifacts/aggressive-dev-plan.json \ No newline at end of file diff --git a/.github/workflows/ai-micro-ci.yml b/.github/workflows/ai-micro-ci.yml deleted file mode 100644 index 6a445663..00000000 --- a/.github/workflows/ai-micro-ci.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: AI Microservice CI - -on: - push: - paths: - - "ai-micro/**" - - ".github/workflows/ai-micro-ci.yml" - pull_request: - paths: - - "ai-micro/**" - -jobs: - test-build: - runs-on: ubuntu-latest - defaults: - run: - working-directory: ai-micro - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup Node - uses: actions/setup-node@v4 - with: - node-version: "20" - cache: npm - cache-dependency-path: ai-micro/package-lock.json - - - name: Install deps - run: npm ci - - - name: Build - run: npm run build - - - name: Docker build - run: docker build -t tradehax/ai-micro:ci . diff --git a/.github/workflows/ethicalcheck.yml b/.github/workflows/ethicalcheck.yml deleted file mode 100644 index 60b3d09c..00000000 --- a/.github/workflows/ethicalcheck.yml +++ /dev/null @@ -1,69 +0,0 @@ -# This workflow uses actions that are not certified by GitHub. -# They are provided by a third-party and are governed by -# separate terms of service, privacy policy, and support -# documentation. - -# EthicalCheck addresses the critical need to continuously security test APIs in development and in production. - -# EthicalCheck provides the industry’s only free & automated API security testing service that uncovers security vulnerabilities using OWASP API list. -# Developers relies on EthicalCheck to evaluate every update and release, ensuring that no APIs go to production with exploitable vulnerabilities. - -# You develop the application and API, we bring complete and continuous security testing to you, accelerating development. - -# Know your API and Applications are secure with EthicalCheck – our free & automated API security testing service. - -# How EthicalCheck works? -# EthicalCheck functions in the following simple steps. -# 1. Security Testing. -# Provide your OpenAPI specification or start with a public Postman collection URL. -# EthicalCheck instantly instrospects your API and creates a map of API endpoints for security testing. -# It then automatically creates hundreds of security tests that are non-intrusive to comprehensively and completely test for authentication, authorizations, and OWASP bugs your API. The tests addresses the OWASP API Security categories including OAuth 2.0, JWT, Rate Limit etc. - -# 2. Reporting. -# EthicalCheck generates security test report that includes all the tested endpoints, coverage graph, exceptions, and vulnerabilities. -# Vulnerabilities are fully triaged, it contains CVSS score, severity, endpoint information, and OWASP tagging. - - -# This is a starter workflow to help you get started with EthicalCheck Actions - -name: EthicalCheck-Workflow - -# Controls when the workflow will run -on: - # Triggers the workflow on push or pull request events but only for the "main" branch - # Customize trigger events based on your DevSecOps processes. - push: - branches: [ "main" ] - pull_request: - branches: [ "main" ] - schedule: - - cron: '24 19 * * 6' - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -permissions: - contents: read - -jobs: - Trigger_EthicalCheck: - permissions: - security-events: write # for github/codeql-action/upload-sarif to upload SARIF results - actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status - runs-on: ubuntu-latest - - steps: - - name: EthicalCheck Free & Automated API Security Testing Service - uses: apisec-inc/ethicalcheck-action@005fac321dd843682b1af6b72f30caaf9952c641 - with: - # The OpenAPI Specification URL or Swagger Path or Public Postman collection URL. - oas-url: "http://netbanking.apisec.ai:8080/v2/api-docs" - # The email address to which the penetration test report will be sent. - email: "xxx@apisec.ai" - sarif-result-file: "ethicalcheck-results.sarif" - - - name: Upload sarif file to repository - uses: github/codeql-action/upload-sarif@v3 - with: - sarif_file: ./ethicalcheck-results.sarif - diff --git a/.github/workflows/github-pages.yml b/.github/workflows/github-pages.yml deleted file mode 100644 index 8e73aacf..00000000 --- a/.github/workflows/github-pages.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: Deploy to GitHub Pages - -on: - workflow_dispatch: - -permissions: - contents: write - pages: write - id-token: write - -jobs: - build-and-deploy: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Setup Node.js - uses: actions/setup-node@v4 - with: - node-version: "20" - cache: "npm" - - - name: Install dependencies - run: npm ci - - - name: Build static site - run: npm run build - env: - NEXT_PUBLIC_SITE_URL: https://tradehaxai.tech - NEXT_PUBLIC_SOLANA_NETWORK: mainnet-beta - NEXT_PUBLIC_SOLANA_RPC: https://api.mainnet-beta.solana.com - - - name: Add CNAME file - run: echo "tradehaxai.tech" > ./out/CNAME - - - name: Add .nojekyll file - run: touch ./out/.nojekyll - - - name: Deploy to GitHub Pages - uses: peaceiris/actions-gh-pages@v4 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: ./out - publish_branch: gh-pages - force_orphan: false - cname: tradehaxai.tech diff --git a/.github/workflows/hivemind-quality-gate.yml b/.github/workflows/hivemind-quality-gate.yml deleted file mode 100644 index 87214eb2..00000000 --- a/.github/workflows/hivemind-quality-gate.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: Hivemind Quality Gate - -on: - schedule: - - cron: "17 5 * * *" - workflow_dispatch: - inputs: - strict: - description: "Run strict quality gate (fail on readiness/benchmark issues)" - required: false - default: "false" - -jobs: - hivemind-quality: - runs-on: ubuntu-latest - permissions: - contents: read - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup Node - uses: actions/setup-node@v4 - with: - node-version: "20" - cache: "npm" - - - name: Install dependencies - run: npm ci - - - name: Ensure evaluation assets exist - run: | - npm run tradebot:generate-eval-suite - npm run tradebot:init-eval-responses - npm run tradebot:score-eval - - - name: Hivemind readiness (informational) - if: ${{ github.event.inputs.strict != 'true' }} - run: npm run hivemind:doctor - - - name: Benchmark trend check (informational) - if: ${{ github.event.inputs.strict != 'true' }} - run: npm run hivemind:benchmark - - - name: Strict quality gate - if: ${{ github.event.inputs.strict == 'true' }} - run: npm run hivemind:quality:strict - - - name: Upload benchmark artifacts - if: always() - uses: actions/upload-artifact@v4 - with: - name: hivemind-quality-artifacts - path: | - data/tradebot/eval-score.json - data/tradebot/eval-history.jsonl diff --git a/.github/workflows/install-hooks-test.yml b/.github/workflows/install-hooks-test.yml deleted file mode 100644 index 2ad63141..00000000 --- a/.github/workflows/install-hooks-test.yml +++ /dev/null @@ -1,54 +0,0 @@ -name: Install Hooks Smoke Test - -on: - push: - branches: [ main ] - pull_request: - workflow_dispatch: - -jobs: - test-ubuntu: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Setup Node - uses: actions/setup-node@v4 - with: - node-version: '18' - - name: Make scripts executable - run: | - chmod +x ./scripts/install-hooks.sh || true - - name: Run Node wrapper - run: | - node ./scripts/install-hooks.js - - name: Verify hooksPath - run: | - CURRENT=$(git config --local --get core.hooksPath || true) - echo "core.hooksPath=$CURRENT" - if [ "$CURRENT" != ".githooks" ]; then - echo "core.hooksPath not set to .githooks" - exit 1 - fi - - test-windows: - runs-on: windows-latest - steps: - - uses: actions/checkout@v4 - - name: Setup Node - uses: actions/setup-node@v4 - with: - node-version: '18' - - name: Run PowerShell installer (or Node wrapper) - shell: pwsh - run: | - if (Test-Path -Path ./scripts/install-hooks.ps1) { - pwsh -NoProfile -NonInteractive -ExecutionPolicy Bypass -File .\scripts\install-hooks.ps1 - } else { - node .\scripts\install-hooks.js - } - - name: Verify hooksPath - shell: pwsh - run: | - $current = git config --local --get core.hooksPath - Write-Host "core.hooksPath=$current" - if ($current -ne '.githooks') { throw "core.hooksPath not set to .githooks" } diff --git a/.github/workflows/intelligence-ingest-quality.yml b/.github/workflows/intelligence-ingest-quality.yml deleted file mode 100644 index f409a562..00000000 --- a/.github/workflows/intelligence-ingest-quality.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: Intelligence Ingest Quality - -on: - schedule: - - cron: "23 */8 * * *" - workflow_dispatch: - -jobs: - ingest-quality: - runs-on: ubuntu-latest - permissions: - contents: read - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Run remote intelligence ingest and capture report - env: - TRADEHAX_CRON_SECRET: ${{ secrets.TRADEHAX_CRON_SECRET }} - run: | - mkdir -p .artifacts - - if [ -z "$TRADEHAX_CRON_SECRET" ]; then - echo '{"ok":false,"skipped":true,"reason":"Missing TRADEHAX_CRON_SECRET in repository secrets."}' > .artifacts/intelligence-ingest-report.json - exit 0 - fi - - curl --silent --show-error --fail \ - -H "Authorization: Bearer $TRADEHAX_CRON_SECRET" \ - "https://tradehax.net/api/cron/ai/intelligence-ingest" \ - > .artifacts/intelligence-ingest-report.json - - - name: Append run summary - run: | - node -e "const fs=require('node:fs'); const p='.artifacts/intelligence-ingest-report.json'; const out=process.env.GITHUB_STEP_SUMMARY; let data={ok:false,error:'missing_report'}; try{data=JSON.parse(fs.readFileSync(p,'utf8'));}catch{} const lines=[]; lines.push('## 🧠 Intelligence Ingest Report'); lines.push(''); lines.push('- Status: '+(data.ok?'βœ… OK':'⚠️ Check')); if(data.generatedAt) lines.push('- Generated: '+data.generatedAt); if(data.mode) lines.push('- Mode: '+data.mode); if(data.reason) lines.push('- Reason: '+data.reason); if(data.totals){ lines.push('- Discovered: '+(data.totals.discovered ?? 'n/a')); lines.push('- Accepted by quality: '+(data.totals.acceptedByQuality ?? 'n/a')); lines.push('- Queued: '+(data.totals.queued ?? 'n/a')); lines.push('- Upserted: '+(data.totals.upserted ?? 'n/a')); lines.push('- Skipped unchanged: '+(data.totals.skippedUnchanged ?? 'n/a')); lines.push('- Skipped low quality: '+(data.totals.skippedLowQuality ?? 'n/a')); lines.push('- Skipped budgeted: '+(data.totals.skippedBudgeted ?? 'n/a')); lines.push('- Skipped errored: '+(data.totals.skippedErrored ?? 'n/a')); } if(data.quality){ lines.push('- Min quality score: '+(data.quality.minimumScore ?? 'n/a')); lines.push('- Avg accepted score: '+(data.quality.averageAcceptedScore ?? 'n/a')); } if(data.cost){ lines.push('- Max docs/run: '+(data.cost.maxTotalDocs ?? 'n/a')); lines.push('- Max embed calls/run: '+(data.cost.maxEmbedCalls ?? 'n/a')); lines.push('- Embed calls used: '+(data.cost.embedCallsUsed ?? 'n/a')); } fs.appendFileSync(out, lines.join('\n')+'\n');" - - - name: Evaluate rollback and alert conditions - run: | - node -e "const fs=require('node:fs'); const p='.artifacts/intelligence-ingest-report.json'; let data={ok:false,error:'missing_report'}; try{data=JSON.parse(fs.readFileSync(p,'utf8'));}catch{} if(data.skipped){ console.log('Skipping alert gate for skipped run.'); process.exit(0);} if(data.ok===false){ console.error('Ingest report indicates failure.'); process.exit(1);} const triggered=Boolean(data?.rollback?.triggered || data?.rollback?.triggeredThisRun); if(triggered){ const reasons=Array.isArray(data?.rollback?.reasons)&&data.rollback.reasons.length?data.rollback.reasons.join('; '):'unspecified degradation'; console.error('Rollback triggered: '+reasons); process.exit(1);} console.log('Rollback guardrail check passed.');" - - - name: Upload ingest artifacts - if: always() - uses: actions/upload-artifact@v4 - with: - name: intelligence-ingest-report - path: | - .artifacts/intelligence-ingest-report.json diff --git a/.github/workflows/lighthouse-ci.yml b/.github/workflows/lighthouse-ci.yml deleted file mode 100644 index fd902c12..00000000 --- a/.github/workflows/lighthouse-ci.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: Lighthouse CI - -on: - push: - branches: [main] - pull_request: - branches: [main] - -permissions: - contents: read - -jobs: - lighthouse: - name: Lighthouse Performance Audit - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Setup Node.js - uses: actions/setup-node@v4 - with: - node-version: 20 - cache: npm - - - name: Install dependencies - run: npm ci - - - name: Build Next.js app - run: npm run build - env: - NEXT_FORCE_STATIC_EXPORT: "0" - - - name: Start production server - run: npm start & - env: - PORT: 3000 - - - name: Wait for server to be ready - run: npx wait-on http://localhost:3000 --timeout 60000 - - - name: Run Lighthouse CI - uses: treosh/lighthouse-ci-action@v12 - with: - urls: | - http://localhost:3000/ - budgetPath: ./.lighthouserc.json - uploadArtifacts: true - temporaryPublicStorage: true diff --git a/.github/workflows/live-delta-dataset-refresh.yml b/.github/workflows/live-delta-dataset-refresh.yml deleted file mode 100644 index e64ca110..00000000 --- a/.github/workflows/live-delta-dataset-refresh.yml +++ /dev/null @@ -1,63 +0,0 @@ -name: Live Delta Dataset Refresh - -on: - schedule: - - cron: "17 */6 * * *" - workflow_dispatch: - -jobs: - refresh-dataset-deltas: - runs-on: ubuntu-latest - permissions: - contents: write - - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Setup Node - uses: actions/setup-node@v4 - with: - node-version: 20 - cache: npm - - - name: Install dependencies - run: npm ci - - - name: Ingest live training deltas - env: - FINNHUB_API_KEY: ${{ secrets.FINNHUB_API_KEY }} - TRADEHAX_LIVE_SYMBOLS: BTC,ETH,SOL,SPY,QQQ,TSLA,NVDA - TRADEHAX_LIVE_MAX_SYMBOLS: "12" - TRADEHAX_LIVE_INGEST_RETRIES: "2" - TRADEHAX_LIVE_INGEST_TIMEOUT_MS: "9000" - run: npm run llm:ingest-live-deltas - - - name: Prepare and validate dataset quality - run: | - npm run llm:prepare-dataset - npm run llm:validate-dataset - - - name: Commit new external dataset deltas - run: | - if git diff --quiet -- data/external-datasets; then - echo "No external dataset delta changes to commit." - exit 0 - fi - - git config user.name "github-actions[bot]" - git config user.email "41898282+github-actions[bot]@users.noreply.github.com" - - git add data/external-datasets - git commit -m "chore(data): refresh live market training deltas" - git push origin HEAD:main - - - name: Upload ingestion state artifact - if: always() - uses: actions/upload-artifact@v4 - with: - name: live-delta-ingest-state - path: | - .artifacts/live-delta-ingest-state.json diff --git a/.github/workflows/nextjs.yml.disabled b/.github/workflows/nextjs.yml.disabled deleted file mode 100644 index ed747367..00000000 --- a/.github/workflows/nextjs.yml.disabled +++ /dev/null @@ -1,93 +0,0 @@ -# Sample workflow for building and deploying a Next.js site to GitHub Pages -# -# To get started with Next.js see: https://nextjs.org/docs/getting-started -# -name: Deploy Next.js site to Pages - -on: - # Runs on pushes targeting the default branch - push: - branches: ["main"] - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages -permissions: - contents: read - pages: write - id-token: write - -# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. -# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. -concurrency: - group: "pages" - cancel-in-progress: false - -jobs: - # Build job - build: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Detect package manager - id: detect-package-manager - run: | - if [ -f "${{ github.workspace }}/yarn.lock" ]; then - echo "manager=yarn" >> $GITHUB_OUTPUT - echo "command=install" >> $GITHUB_OUTPUT - echo "runner=yarn" >> $GITHUB_OUTPUT - exit 0 - elif [ -f "${{ github.workspace }}/package.json" ]; then - echo "manager=npm" >> $GITHUB_OUTPUT - echo "command=ci" >> $GITHUB_OUTPUT - echo "runner=npx --no-install" >> $GITHUB_OUTPUT - exit 0 - else - echo "Unable to determine package manager" - exit 1 - fi - - name: Setup Node - uses: actions/setup-node@v4 - with: - node-version: "20" - cache: ${{ steps.detect-package-manager.outputs.manager }} - - name: Setup Pages - uses: actions/configure-pages@v5 - with: - # Automatically inject basePath in your Next.js configuration file and disable - # server side image optimization (https://nextjs.org/docs/api-reference/next/image#unoptimized). - # - # You may remove this line if you want to manage the configuration yourself. - static_site_generator: next - - name: Restore cache - uses: actions/cache@v4 - with: - path: | - .next/cache - # Generate a new cache whenever packages or source files change. - key: ${{ runner.os }}-nextjs-${{ hashFiles('**/package-lock.json', '**/yarn.lock') }}-${{ hashFiles('**.[jt]s', '**.[jt]sx') }} - # If source files changed but packages didn't, rebuild from a prior cache. - restore-keys: | - ${{ runner.os }}-nextjs-${{ hashFiles('**/package-lock.json', '**/yarn.lock') }}- - - name: Install dependencies - run: ${{ steps.detect-package-manager.outputs.manager }} ${{ steps.detect-package-manager.outputs.command }} - - name: Build with Next.js - run: ${{ steps.detect-package-manager.outputs.runner }} next build - - name: Upload artifact - uses: actions/upload-pages-artifact@v3 - with: - path: ./out - - # Deployment job - deploy: - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest - needs: build - steps: - - name: Deploy to GitHub Pages - id: deployment - uses: actions/deploy-pages@v4 diff --git a/.github/workflows/qodana_code_quality.yml b/.github/workflows/qodana_code_quality.yml deleted file mode 100644 index 73481df2..00000000 --- a/.github/workflows/qodana_code_quality.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Qodana -on: - workflow_dispatch: - pull_request: - push: - branches: # Specify your branches here - - main # The 'main' branch - - 'releases/*' # The release branches - -jobs: - qodana: - runs-on: ubuntu-latest - permissions: - contents: write - pull-requests: write - checks: write - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.pull_request.head.sha }} # to check out the actual pull request commit, not the merge commit - fetch-depth: 0 # a full history is required for pull request analysis - - name: 'Qodana Scan' - uses: JetBrains/qodana-action@v2025.3 - with: - pr-mode: false - env: - QODANA_TOKEN: ${{ secrets.QODANA_TOKEN_1287543422 }} - QODANA_ENDPOINT: 'https://qodana.cloud' \ No newline at end of file diff --git a/.github/workflows/readiness-gate.yml b/.github/workflows/readiness-gate.yml deleted file mode 100644 index c8d53110..00000000 --- a/.github/workflows/readiness-gate.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: Launch Readiness Gate - -"on": - schedule: - - cron: "33 4 * * 1" - workflow_dispatch: - -jobs: - readiness-gate: - runs-on: ubuntu-latest - permissions: - contents: read - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup Node - uses: actions/setup-node@v4 - with: - node-version: "20" - cache: "npm" - - - name: Install dependencies - run: npm ci - - - name: Internal links (strict) - run: npm run check:links:strict:report - - - name: Quality checks - run: npm run pipeline:quality - - - name: Vercel deployment preflight - run: npm run check:vercel - - - name: Snow service env preflight - run: npm run snow:env:check - - - name: Upload readiness artifacts - if: always() - uses: actions/upload-artifact@v4 - with: - name: readiness-gate-artifacts - path: | - .artifacts/link-audit-report.json - .artifacts/link-audit.sarif diff --git a/.gitignore b/.gitignore index 15d20270b628c8447d0a5679961227aac319c932..8c26363d4ce030194ec849ea2b6ac8cfe98e8590 100644 GIT binary patch delta 262 zcmXAhF;2ul3`K=d8cW<^r0rHp20D(w3PD9lE$26vF{yb7d?dk+JTx{*%jE_P`_k1|=9amy)zDH{c8?d^hVl{qve_>L` KVd-_mK=2FUX;^{) delta 242 zcmey&dYhH`{qFq}JSJ8}nL2vLyZVKB2KoE>y84C0hq(F%_&A2R>ZRtDaRKFgJi}Z; z3gVr7LR|xbJpDpIG7}$eo;;UvMLjQ9E<++iCPN;BK0^sZE<*uBJVQAc#xo=_T(!(8MuJzVS39MQi1CA!J=HbiJ5u&CAkIh<>lq^N%<+2 d@rlLBnVEVSCAm2e$@0`BeO@lETy-K<0sw{HN8$hg diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index d86ea18c..00000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,89 +0,0 @@ -stages: - - build - - test - - deploy - -variables: - REGISTRY: ghcr.io - IMAGE_NAME: $REGISTRY/$CI_PROJECT_PATH - IMAGE_TAG: $CI_COMMIT_SHA - -build:docker: - stage: build - image: docker:latest - services: - - docker:dind - script: - - echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER --password-stdin $REGISTRY - - docker build -t $IMAGE_NAME:$IMAGE_TAG -t $IMAGE_NAME:latest . - - docker push $IMAGE_NAME:$IMAGE_TAG - - docker push $IMAGE_NAME:latest - only: - - main - - merge_requests - -test:ci: - stage: test - image: node:20-alpine - script: - - npm ci - - npm run lint - - npm run build - artifacts: - paths: - - .next/ - expire_in: 1 hour - only: - - main - - merge_requests - -deploy:k8s: - stage: deploy - image: bitnami/kubectl:latest - environment: - name: production - url: https://tradehaxai.tech - script: - - kubectl set image deployment/tradehax-app tradehax=$IMAGE_NAME:$IMAGE_TAG -n default - - kubectl rollout status deployment/tradehax-app -n default - only: - - main - -deploy:helm: - stage: deploy - image: alpine/helm:latest - environment: - name: staging - url: https://staging.tradehaxai.tech - script: - - helm repo add tradehax https://charts.tradehaxai.tech || true - - helm repo update - - helm upgrade --install tradehax tradehax/tradehax - --namespace default - --values helm/values.yaml - --set image.tag=$IMAGE_TAG - --wait - only: - - main - -security:scan: - stage: test - image: aquasec/trivy:latest - script: - - trivy image --exit-code 0 $IMAGE_NAME:$IMAGE_TAG - allow_failure: true - only: - - main - -docs:deploy: - stage: deploy - script: - - mkdir -p public/docs - - cp TRADEHAX_AI_PLATFORM_SUMMARY.md public/docs/ - - cp GITLAB_AGENT_DEPLOYMENT.md public/docs/ - - cp HF_SETUP_GUIDE.md public/docs/ - artifacts: - paths: - - public - only: - - main diff --git a/.htaccess b/.htaccess deleted file mode 100644 index 3917e340..00000000 --- a/.htaccess +++ /dev/null @@ -1,24 +0,0 @@ -# Redirect bare domain to www.tradehax.net - - RewriteEngine On - RewriteCond %{HTTP_HOST} ^tradehax\.net$ [NC] - RewriteRule ^(.*)$ https://www.tradehax.net/$1 [L,R=301] - - -# Disable directory browsing - - Options -Indexes - - -# Prevent exposure of source files and common sensitive extensions - - - Require all denied - - - - - Order allow,deny - Deny from all - - diff --git a/.idea/.gitignore b/.idea/.gitignore deleted file mode 100644 index ab1f4164..00000000 --- a/.idea/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -# Default ignored files -/shelf/ -/workspace.xml -# Ignored default folder with query files -/queries/ -# Datasource local storage ignored files -/dataSources/ -/dataSources.local.xml -# Editor-based HTTP Client requests -/httpRequests/ diff --git a/.idea/IntelliLang.xml b/.idea/IntelliLang.xml deleted file mode 100644 index b223a1b2..00000000 --- a/.idea/IntelliLang.xml +++ /dev/null @@ -1,151 +0,0 @@ - - - - - AsyncQueryRunner (org.apache.commons.dbutils) - - - - - - - - - - - - - - - - - - Jodd (jodd.db) - - - - - - - - MyBatis @Select/@Delete/@Insert/@Update - - - - - - - - QueryRunner (org.apache.commons.dbutils) - - - - - - - - - - - - - - - - - - R2DBC (io.r2dbc) - - - - - - Reactiverse Postgres Client (io.reactiverse) - - - - - - - - - - - - - SmallRye Axle SqlClient (io.vertx.axle.sqlclient) - - - - - - SmallRye Mutiny SqlClient (io.vertx.mutiny.sqlclient) - - - - - - SmallRye Mutiny SqlConnection (io.vertx.mutiny.sqlclient) - - - - - - - - Vert.x SQL Extensions (io.vertx.ext.sql) - - - - - - - Vert.x SQL Reactive Extensions (io.vertx.reactivex.ext.sql) - - - - - - - - - - Vert.x SqlClient (io.vertx.sqlclient) - - - - - - - - - - - Vert.x SqlClient RxJava2 (io.vertx.reactivex.sqlclient) - - - - - - - - - - - - jOOQ (org.jooq.DSLContext) - - - - - - - - rxjava2-jdbc (org.davidmoten.rx.jdbc) - - - - - - - \ No newline at end of file diff --git a/.idea/go.imports.xml b/.idea/go.imports.xml deleted file mode 100644 index d7202f03..00000000 --- a/.idea/go.imports.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml deleted file mode 100644 index 4b151abf..00000000 --- a/.idea/misc.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml deleted file mode 100644 index dbc52b6f..00000000 --- a/.idea/modules.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/.idea/tradehax.iml b/.idea/tradehax.iml deleted file mode 100644 index d6ebd480..00000000 --- a/.idea/tradehax.iml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml deleted file mode 100644 index 35eb1ddf..00000000 --- a/.idea/vcs.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.junie/skills/supabase-postgres-best-practices/AGENTS.md b/.junie/skills/supabase-postgres-best-practices/AGENTS.md deleted file mode 100644 index a7baf445..00000000 --- a/.junie/skills/supabase-postgres-best-practices/AGENTS.md +++ /dev/null @@ -1,68 +0,0 @@ -# Supabase Postgres Best Practices - -## Structure - -``` -supabase-postgres-best-practices/ - SKILL.md # Main skill file - read this first - AGENTS.md # This navigation guide - CLAUDE.md # Symlink to AGENTS.md - references/ # Detailed reference files -``` - -## Usage - -1. Read `SKILL.md` for the main skill instructions -2. Browse `references/` for detailed documentation on specific topics -3. Reference files are loaded on-demand - read only what you need - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.junie/skills/supabase-postgres-best-practices/CLAUDE.md b/.junie/skills/supabase-postgres-best-practices/CLAUDE.md deleted file mode 100644 index 47dc3e3d..00000000 --- a/.junie/skills/supabase-postgres-best-practices/CLAUDE.md +++ /dev/null @@ -1 +0,0 @@ -AGENTS.md \ No newline at end of file diff --git a/.junie/skills/supabase-postgres-best-practices/README.md b/.junie/skills/supabase-postgres-best-practices/README.md deleted file mode 100644 index f1a374e1..00000000 --- a/.junie/skills/supabase-postgres-best-practices/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# Supabase Postgres Best Practices - Contributor Guide - -This skill contains Postgres performance optimization references optimized for -AI agents and LLMs. It follows the [Agent Skills Open Standard](https://agentskills.io/). - -## Quick Start - -```bash -# From repository root -npm install - -# Validate existing references -npm run validate - -# Build AGENTS.md -npm run build -``` - -## Creating a New Reference - -1. **Choose a section prefix** based on the category: - - `query-` Query Performance (CRITICAL) - - `conn-` Connection Management (CRITICAL) - - `security-` Security & RLS (CRITICAL) - - `schema-` Schema Design (HIGH) - - `lock-` Concurrency & Locking (MEDIUM-HIGH) - - `data-` Data Access Patterns (MEDIUM) - - `monitor-` Monitoring & Diagnostics (LOW-MEDIUM) - - `advanced-` Advanced Features (LOW) - -2. **Copy the template**: - ```bash - cp references/_template.md references/query-your-reference-name.md - ``` - -3. **Fill in the content** following the template structure - -4. **Validate and build**: - ```bash - npm run validate - npm run build - ``` - -5. **Review** the generated `AGENTS.md` - -## Skill Structure - -``` -skills/supabase-postgres-best-practices/ -β”œβ”€β”€ SKILL.md # Agent-facing skill manifest (Agent Skills spec) -β”œβ”€β”€ AGENTS.md # [GENERATED] Compiled references document -β”œβ”€β”€ README.md # This file -└── references/ - β”œβ”€β”€ _template.md # Reference template - β”œβ”€β”€ _sections.md # Section definitions - β”œβ”€β”€ _contributing.md # Writing guidelines - └── *.md # Individual references - -packages/skills-build/ -β”œβ”€β”€ src/ # Generic build system source -└── package.json # NPM scripts -``` - -## Reference File Structure - -See `references/_template.md` for the complete template. Key elements: - -````markdown ---- -title: Clear, Action-Oriented Title -impact: CRITICAL|HIGH|MEDIUM-HIGH|MEDIUM|LOW-MEDIUM|LOW -impactDescription: Quantified benefit (e.g., "10-100x faster") -tags: relevant, keywords ---- - -## [Title] - -[1-2 sentence explanation] - -**Incorrect (description):** - -```sql --- Comment explaining what's wrong -[Bad SQL example] -``` -```` - -**Correct (description):** - -```sql --- Comment explaining why this is better -[Good SQL example] -``` - -``` -## Writing Guidelines - -See `references/_contributing.md` for detailed guidelines. Key principles: - -1. **Show concrete transformations** - "Change X to Y", not abstract advice -2. **Error-first structure** - Show the problem before the solution -3. **Quantify impact** - Include specific metrics (10x faster, 50% smaller) -4. **Self-contained examples** - Complete, runnable SQL -5. **Semantic naming** - Use meaningful names (users, email), not (table1, col1) - -## Impact Levels - -| Level | Improvement | Examples | -|-------|-------------|----------| -| CRITICAL | 10-100x | Missing indexes, connection exhaustion | -| HIGH | 5-20x | Wrong index types, poor partitioning | -| MEDIUM-HIGH | 2-5x | N+1 queries, RLS optimization | -| MEDIUM | 1.5-3x | Redundant indexes, stale statistics | -| LOW-MEDIUM | 1.2-2x | VACUUM tuning, config tweaks | -| LOW | Incremental | Advanced patterns, edge cases | -``` diff --git a/.junie/skills/supabase-postgres-best-practices/SKILL.md b/.junie/skills/supabase-postgres-best-practices/SKILL.md deleted file mode 100644 index f80be156..00000000 --- a/.junie/skills/supabase-postgres-best-practices/SKILL.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -name: supabase-postgres-best-practices -description: Postgres performance optimization and best practices from Supabase. Use this skill when writing, reviewing, or optimizing Postgres queries, schema designs, or database configurations. -license: MIT -metadata: - author: supabase - version: "1.1.0" - organization: Supabase - date: January 2026 - abstract: Comprehensive Postgres performance optimization guide for developers using Supabase and Postgres. Contains performance rules across 8 categories, prioritized by impact from critical (query performance, connection management) to incremental (advanced features). Each rule includes detailed explanations, incorrect vs. correct SQL examples, query plan analysis, and specific performance metrics to guide automated optimization and code generation. ---- - -# Supabase Postgres Best Practices - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.junie/skills/supabase-postgres-best-practices/references/_contributing.md b/.junie/skills/supabase-postgres-best-practices/references/_contributing.md deleted file mode 100644 index 10de8ecb..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/_contributing.md +++ /dev/null @@ -1,171 +0,0 @@ -# Writing Guidelines for Postgres References - -This document provides guidelines for creating effective Postgres best -practice references that work well with AI agents and LLMs. - -## Key Principles - -### 1. Concrete Transformation Patterns - -Show exact SQL rewrites. Avoid philosophical advice. - -**Good:** "Use `WHERE id = ANY(ARRAY[...])` instead of -`WHERE id IN (SELECT ...)`" **Bad:** "Design good schemas" - -### 2. Error-First Structure - -Always show the problematic pattern first, then the solution. This trains agents -to recognize anti-patterns. - -```markdown -**Incorrect (sequential queries):** [bad example] - -**Correct (batched query):** [good example] -``` - -### 3. Quantified Impact - -Include specific metrics. Helps agents prioritize fixes. - -**Good:** "10x faster queries", "50% smaller index", "Eliminates N+1" -**Bad:** "Faster", "Better", "More efficient" - -### 4. Self-Contained Examples - -Examples should be complete and runnable (or close to it). Include `CREATE TABLE` -if context is needed. - -```sql --- Include table definition when needed for clarity -CREATE TABLE users ( - id bigint PRIMARY KEY, - email text NOT NULL, - deleted_at timestamptz -); - --- Now show the index -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; -``` - -### 5. Semantic Naming - -Use meaningful table/column names. Names carry intent for LLMs. - -**Good:** `users`, `email`, `created_at`, `is_active` -**Bad:** `table1`, `col1`, `field`, `flag` - ---- - -## Code Example Standards - -### SQL Formatting - -```sql --- Use lowercase keywords, clear formatting -CREATE INDEX CONCURRENTLY users_email_idx - ON users(email) - WHERE deleted_at IS NULL; - --- Not cramped or ALL CAPS -CREATE INDEX CONCURRENTLY USERS_EMAIL_IDX ON USERS(EMAIL) WHERE DELETED_AT IS NULL; -``` - -### Comments - -- Explain _why_, not _what_ -- Highlight performance implications -- Point out common pitfalls - -### Language Tags - -- `sql` - Standard SQL queries -- `plpgsql` - Stored procedures/functions -- `typescript` - Application code (when needed) -- `python` - Application code (when needed) - ---- - -## When to Include Application Code - -**Default: SQL Only** - -Most references should focus on pure SQL patterns. This keeps examples portable. - -**Include Application Code When:** - -- Connection pooling configuration -- Transaction management in application context -- ORM anti-patterns (N+1 in Prisma/TypeORM) -- Prepared statement usage - -**Format for Mixed Examples:** - -````markdown -**Incorrect (N+1 in application):** - -```typescript -for (const user of users) { - const posts = await db.query("SELECT * FROM posts WHERE user_id = $1", [ - user.id, - ]); -} -``` -```` - -**Correct (batch query):** - -```typescript -const posts = await db.query("SELECT * FROM posts WHERE user_id = ANY($1)", [ - userIds, -]); -``` - ---- - -## Impact Level Guidelines - -| Level | Improvement | Use When | -|-------|-------------|----------| -| **CRITICAL** | 10-100x | Missing indexes, connection exhaustion, sequential scans on large tables | -| **HIGH** | 5-20x | Wrong index types, poor partitioning, missing covering indexes | -| **MEDIUM-HIGH** | 2-5x | N+1 queries, inefficient pagination, RLS optimization | -| **MEDIUM** | 1.5-3x | Redundant indexes, query plan instability | -| **LOW-MEDIUM** | 1.2-2x | VACUUM tuning, configuration tweaks | -| **LOW** | Incremental | Advanced patterns, edge cases | - ---- - -## Reference Standards - -**Primary Sources:** - -- Official Postgres documentation -- Supabase documentation -- Postgres wiki -- Established blogs (2ndQuadrant, Crunchy Data) - -**Format:** - -```markdown -Reference: -[Postgres Indexes](https://www.postgresql.org/docs/current/indexes.html) -``` - ---- - -## Review Checklist - -Before submitting a reference: - -- [ ] Title is clear and action-oriented -- [ ] Impact level matches the performance gain -- [ ] impactDescription includes quantification -- [ ] Explanation is concise (1-2 sentences) -- [ ] Has at least 1 **Incorrect** SQL example -- [ ] Has at least 1 **Correct** SQL example -- [ ] SQL uses semantic naming -- [ ] Comments explain _why_, not _what_ -- [ ] Trade-offs mentioned if applicable -- [ ] Reference links included -- [ ] `npm run validate` passes -- [ ] `npm run build` generates correct output diff --git a/.junie/skills/supabase-postgres-best-practices/references/_sections.md b/.junie/skills/supabase-postgres-best-practices/references/_sections.md deleted file mode 100644 index 8ba57c23..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/_sections.md +++ /dev/null @@ -1,39 +0,0 @@ -# Section Definitions - -This file defines the rule categories for Postgres best practices. Rules are automatically assigned to sections based on their filename prefix. - -Take the examples below as pure demonstrative. Replace each section with the actual rule categories for Postgres best practices. - ---- - -## 1. Query Performance (query) -**Impact:** CRITICAL -**Description:** Slow queries, missing indexes, inefficient query plans. The most common source of Postgres performance issues. - -## 2. Connection Management (conn) -**Impact:** CRITICAL -**Description:** Connection pooling, limits, and serverless strategies. Critical for applications with high concurrency or serverless deployments. - -## 3. Security & RLS (security) -**Impact:** CRITICAL -**Description:** Row-Level Security policies, privilege management, and authentication patterns. - -## 4. Schema Design (schema) -**Impact:** HIGH -**Description:** Table design, index strategies, partitioning, and data type selection. Foundation for long-term performance. - -## 5. Concurrency & Locking (lock) -**Impact:** MEDIUM-HIGH -**Description:** Transaction management, isolation levels, deadlock prevention, and lock contention patterns. - -## 6. Data Access Patterns (data) -**Impact:** MEDIUM -**Description:** N+1 query elimination, batch operations, cursor-based pagination, and efficient data fetching. - -## 7. Monitoring & Diagnostics (monitor) -**Impact:** LOW-MEDIUM -**Description:** Using pg_stat_statements, EXPLAIN ANALYZE, metrics collection, and performance diagnostics. - -## 8. Advanced Features (advanced) -**Impact:** LOW -**Description:** Full-text search, JSONB optimization, PostGIS, extensions, and advanced Postgres features. diff --git a/.junie/skills/supabase-postgres-best-practices/references/_template.md b/.junie/skills/supabase-postgres-best-practices/references/_template.md deleted file mode 100644 index 91ace90e..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/_template.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Clear, Action-Oriented Title (e.g., "Use Partial Indexes for Filtered Queries") -impact: MEDIUM -impactDescription: 5-20x query speedup for filtered queries -tags: indexes, query-optimization, performance ---- - -## [Rule Title] - -[1-2 sentence explanation of the problem and why it matters. Focus on performance impact.] - -**Incorrect (describe the problem):** - -```sql --- Comment explaining what makes this slow/problematic -CREATE INDEX users_email_idx ON users(email); - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- This scans deleted records unnecessarily -``` - -**Correct (describe the solution):** - -```sql --- Comment explaining why this is better -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- Only indexes active users, 10x smaller index, faster queries -``` - -[Optional: Additional context, edge cases, or trade-offs] - -Reference: [Postgres Docs](https://www.postgresql.org/docs/current/) diff --git a/.junie/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md b/.junie/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md deleted file mode 100644 index 582cbeaa..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use tsvector for Full-Text Search -impact: MEDIUM -impactDescription: 100x faster than LIKE, with ranking support -tags: full-text-search, tsvector, gin, search ---- - -## Use tsvector for Full-Text Search - -LIKE with wildcards can't use indexes. Full-text search with tsvector is orders of magnitude faster. - -**Incorrect (LIKE pattern matching):** - -```sql --- Cannot use index, scans all rows -select * from articles where content like '%postgresql%'; - --- Case-insensitive makes it worse -select * from articles where lower(content) like '%postgresql%'; -``` - -**Correct (full-text search with tsvector):** - -```sql --- Add tsvector column and index -alter table articles add column search_vector tsvector - generated always as (to_tsvector('english', coalesce(title,'') || ' ' || coalesce(content,''))) stored; - -create index articles_search_idx on articles using gin (search_vector); - --- Fast full-text search -select * from articles -where search_vector @@ to_tsquery('english', 'postgresql & performance'); - --- With ranking -select *, ts_rank(search_vector, query) as rank -from articles, to_tsquery('english', 'postgresql') query -where search_vector @@ query -order by rank desc; -``` - -Search multiple terms: - -```sql --- AND: both terms required -to_tsquery('postgresql & performance') - --- OR: either term -to_tsquery('postgresql | mysql') - --- Prefix matching -to_tsquery('post:*') -``` - -Reference: [Full Text Search](https://supabase.com/docs/guides/database/full-text-search) diff --git a/.junie/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md b/.junie/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md deleted file mode 100644 index e3d261ea..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Index JSONB Columns for Efficient Querying -impact: MEDIUM -impactDescription: 10-100x faster JSONB queries with proper indexing -tags: jsonb, gin, indexes, json ---- - -## Index JSONB Columns for Efficient Querying - -JSONB queries without indexes scan the entire table. Use GIN indexes for containment queries. - -**Incorrect (no index on JSONB):** - -```sql -create table products ( - id bigint primary key, - attributes jsonb -); - --- Full table scan for every query -select * from products where attributes @> '{"color": "red"}'; -select * from products where attributes->>'brand' = 'Nike'; -``` - -**Correct (GIN index for JSONB):** - -```sql --- GIN index for containment operators (@>, ?, ?&, ?|) -create index products_attrs_gin on products using gin (attributes); - --- Now containment queries use the index -select * from products where attributes @> '{"color": "red"}'; - --- For specific key lookups, use expression index -create index products_brand_idx on products ((attributes->>'brand')); -select * from products where attributes->>'brand' = 'Nike'; -``` - -Choose the right operator class: - -```sql --- jsonb_ops (default): supports all operators, larger index -create index idx1 on products using gin (attributes); - --- jsonb_path_ops: only @> operator, but 2-3x smaller index -create index idx2 on products using gin (attributes jsonb_path_ops); -``` - -Reference: [JSONB Indexes](https://www.postgresql.org/docs/current/datatype-json.html#JSON-INDEXING) diff --git a/.junie/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md b/.junie/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md deleted file mode 100644 index 40b9cc50..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Configure Idle Connection Timeouts -impact: HIGH -impactDescription: Reclaim 30-50% of connection slots from idle clients -tags: connections, timeout, idle, resource-management ---- - -## Configure Idle Connection Timeouts - -Idle connections waste resources. Configure timeouts to automatically reclaim them. - -**Incorrect (connections held indefinitely):** - -```sql --- No timeout configured -show idle_in_transaction_session_timeout; -- 0 (disabled) - --- Connections stay open forever, even when idle -select pid, state, state_change, query -from pg_stat_activity -where state = 'idle in transaction'; --- Shows transactions idle for hours, holding locks -``` - -**Correct (automatic cleanup of idle connections):** - -```sql --- Terminate connections idle in transaction after 30 seconds -alter system set idle_in_transaction_session_timeout = '30s'; - --- Terminate completely idle connections after 10 minutes -alter system set idle_session_timeout = '10min'; - --- Reload configuration -select pg_reload_conf(); -``` - -For pooled connections, configure at the pooler level: - -```ini -# pgbouncer.ini -server_idle_timeout = 60 -client_idle_timeout = 300 -``` - -Reference: [Connection Timeouts](https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT) diff --git a/.junie/skills/supabase-postgres-best-practices/references/conn-limits.md b/.junie/skills/supabase-postgres-best-practices/references/conn-limits.md deleted file mode 100644 index cb3e400c..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/conn-limits.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Set Appropriate Connection Limits -impact: CRITICAL -impactDescription: Prevent database crashes and memory exhaustion -tags: connections, max-connections, limits, stability ---- - -## Set Appropriate Connection Limits - -Too many connections exhaust memory and degrade performance. Set limits based on available resources. - -**Incorrect (unlimited or excessive connections):** - -```sql --- Default max_connections = 100, but often increased blindly -show max_connections; -- 500 (way too high for 4GB RAM) - --- Each connection uses 1-3MB RAM --- 500 connections * 2MB = 1GB just for connections! --- Out of memory errors under load -``` - -**Correct (calculate based on resources):** - -```sql --- Formula: max_connections = (RAM in MB / 5MB per connection) - reserved --- For 4GB RAM: (4096 / 5) - 10 = ~800 theoretical max --- But practically, 100-200 is better for query performance - --- Recommended settings for 4GB RAM -alter system set max_connections = 100; - --- Also set work_mem appropriately --- work_mem * max_connections should not exceed 25% of RAM -alter system set work_mem = '8MB'; -- 8MB * 100 = 800MB max -``` - -Monitor connection usage: - -```sql -select count(*), state from pg_stat_activity group by state; -``` - -Reference: [Database Connections](https://supabase.com/docs/guides/platform/performance#connection-management) diff --git a/.junie/skills/supabase-postgres-best-practices/references/conn-pooling.md b/.junie/skills/supabase-postgres-best-practices/references/conn-pooling.md deleted file mode 100644 index e2ebd581..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/conn-pooling.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Use Connection Pooling for All Applications -impact: CRITICAL -impactDescription: Handle 10-100x more concurrent users -tags: connection-pooling, pgbouncer, performance, scalability ---- - -## Use Connection Pooling for All Applications - -Postgres connections are expensive (1-3MB RAM each). Without pooling, applications exhaust connections under load. - -**Incorrect (new connection per request):** - -```sql --- Each request creates a new connection --- Application code: db.connect() per request --- Result: 500 concurrent users = 500 connections = crashed database - --- Check current connections -select count(*) from pg_stat_activity; -- 487 connections! -``` - -**Correct (connection pooling):** - -```sql --- Use a pooler like PgBouncer between app and database --- Application connects to pooler, pooler reuses a small pool to Postgres - --- Configure pool_size based on: (CPU cores * 2) + spindle_count --- Example for 4 cores: pool_size = 10 - --- Result: 500 concurrent users share 10 actual connections -select count(*) from pg_stat_activity; -- 10 connections -``` - -Pool modes: - -- **Transaction mode**: connection returned after each transaction (best for most apps) -- **Session mode**: connection held for entire session (needed for prepared statements, temp tables) - -Reference: [Connection Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pooler) diff --git a/.junie/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md b/.junie/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md deleted file mode 100644 index 555547d8..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Use Prepared Statements Correctly with Pooling -impact: HIGH -impactDescription: Avoid prepared statement conflicts in pooled environments -tags: prepared-statements, connection-pooling, transaction-mode ---- - -## Use Prepared Statements Correctly with Pooling - -Prepared statements are tied to individual database connections. In transaction-mode pooling, connections are shared, causing conflicts. - -**Incorrect (named prepared statements with transaction pooling):** - -```sql --- Named prepared statement -prepare get_user as select * from users where id = $1; - --- In transaction mode pooling, next request may get different connection -execute get_user(123); --- ERROR: prepared statement "get_user" does not exist -``` - -**Correct (use unnamed statements or session mode):** - -```sql --- Option 1: Use unnamed prepared statements (most ORMs do this automatically) --- The query is prepared and executed in a single protocol message - --- Option 2: Deallocate after use in transaction mode -prepare get_user as select * from users where id = $1; -execute get_user(123); -deallocate get_user; - --- Option 3: Use session mode pooling (port 5432 vs 6543) --- Connection is held for entire session, prepared statements persist -``` - -Check your driver settings: - -```sql --- Many drivers use prepared statements by default --- Node.js pg: { prepare: false } to disable --- JDBC: prepareThreshold=0 to disable -``` - -Reference: [Prepared Statements with Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pool-modes) diff --git a/.junie/skills/supabase-postgres-best-practices/references/data-batch-inserts.md b/.junie/skills/supabase-postgres-best-practices/references/data-batch-inserts.md deleted file mode 100644 index 997947cb..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/data-batch-inserts.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Batch INSERT Statements for Bulk Data -impact: MEDIUM -impactDescription: 10-50x faster bulk inserts -tags: batch, insert, bulk, performance, copy ---- - -## Batch INSERT Statements for Bulk Data - -Individual INSERT statements have high overhead. Batch multiple rows in single statements or use COPY. - -**Incorrect (individual inserts):** - -```sql --- Each insert is a separate transaction and round trip -insert into events (user_id, action) values (1, 'click'); -insert into events (user_id, action) values (1, 'view'); -insert into events (user_id, action) values (2, 'click'); --- ... 1000 more individual inserts - --- 1000 inserts = 1000 round trips = slow -``` - -**Correct (batch insert):** - -```sql --- Multiple rows in single statement -insert into events (user_id, action) values - (1, 'click'), - (1, 'view'), - (2, 'click'), - -- ... up to ~1000 rows per batch - (999, 'view'); - --- One round trip for 1000 rows -``` - -For large imports, use COPY: - -```sql --- COPY is fastest for bulk loading -copy events (user_id, action, created_at) -from '/path/to/data.csv' -with (format csv, header true); - --- Or from stdin in application -copy events (user_id, action) from stdin with (format csv); -1,click -1,view -2,click -\. -``` - -Reference: [COPY](https://www.postgresql.org/docs/current/sql-copy.html) diff --git a/.junie/skills/supabase-postgres-best-practices/references/data-n-plus-one.md b/.junie/skills/supabase-postgres-best-practices/references/data-n-plus-one.md deleted file mode 100644 index 2109186f..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/data-n-plus-one.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Eliminate N+1 Queries with Batch Loading -impact: MEDIUM-HIGH -impactDescription: 10-100x fewer database round trips -tags: n-plus-one, batch, performance, queries ---- - -## Eliminate N+1 Queries with Batch Loading - -N+1 queries execute one query per item in a loop. Batch them into a single query using arrays or JOINs. - -**Incorrect (N+1 queries):** - -```sql --- First query: get all users -select id from users where active = true; -- Returns 100 IDs - --- Then N queries, one per user -select * from orders where user_id = 1; -select * from orders where user_id = 2; -select * from orders where user_id = 3; --- ... 97 more queries! - --- Total: 101 round trips to database -``` - -**Correct (single batch query):** - -```sql --- Collect IDs and query once with ANY -select * from orders where user_id = any(array[1, 2, 3, ...]); - --- Or use JOIN instead of loop -select u.id, u.name, o.* -from users u -left join orders o on o.user_id = u.id -where u.active = true; - --- Total: 1 round trip -``` - -Application pattern: - -```sql --- Instead of looping in application code: --- for user in users: db.query("SELECT * FROM orders WHERE user_id = $1", user.id) - --- Pass array parameter: -select * from orders where user_id = any($1::bigint[]); --- Application passes: [1, 2, 3, 4, 5, ...] -``` - -Reference: [N+1 Query Problem](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.junie/skills/supabase-postgres-best-practices/references/data-pagination.md b/.junie/skills/supabase-postgres-best-practices/references/data-pagination.md deleted file mode 100644 index 633d8393..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/data-pagination.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use Cursor-Based Pagination Instead of OFFSET -impact: MEDIUM-HIGH -impactDescription: Consistent O(1) performance regardless of page depth -tags: pagination, cursor, keyset, offset, performance ---- - -## Use Cursor-Based Pagination Instead of OFFSET - -OFFSET-based pagination scans all skipped rows, getting slower on deeper pages. Cursor pagination is O(1). - -**Incorrect (OFFSET pagination):** - -```sql --- Page 1: scans 20 rows -select * from products order by id limit 20 offset 0; - --- Page 100: scans 2000 rows to skip 1980 -select * from products order by id limit 20 offset 1980; - --- Page 10000: scans 200,000 rows! -select * from products order by id limit 20 offset 199980; -``` - -**Correct (cursor/keyset pagination):** - -```sql --- Page 1: get first 20 -select * from products order by id limit 20; --- Application stores last_id = 20 - --- Page 2: start after last ID -select * from products where id > 20 order by id limit 20; --- Uses index, always fast regardless of page depth - --- Page 10000: same speed as page 1 -select * from products where id > 199980 order by id limit 20; -``` - -For multi-column sorting: - -```sql --- Cursor must include all sort columns -select * from products -where (created_at, id) > ('2024-01-15 10:00:00', 12345) -order by created_at, id -limit 20; -``` - -Reference: [Pagination](https://supabase.com/docs/guides/database/pagination) diff --git a/.junie/skills/supabase-postgres-best-practices/references/data-upsert.md b/.junie/skills/supabase-postgres-best-practices/references/data-upsert.md deleted file mode 100644 index bc95e230..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/data-upsert.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use UPSERT for Insert-or-Update Operations -impact: MEDIUM -impactDescription: Atomic operation, eliminates race conditions -tags: upsert, on-conflict, insert, update ---- - -## Use UPSERT for Insert-or-Update Operations - -Using separate SELECT-then-INSERT/UPDATE creates race conditions. Use INSERT ... ON CONFLICT for atomic upserts. - -**Incorrect (check-then-insert race condition):** - -```sql --- Race condition: two requests check simultaneously -select * from settings where user_id = 123 and key = 'theme'; --- Both find nothing - --- Both try to insert -insert into settings (user_id, key, value) values (123, 'theme', 'dark'); --- One succeeds, one fails with duplicate key error! -``` - -**Correct (atomic UPSERT):** - -```sql --- Single atomic operation -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value, updated_at = now(); - --- Returns the inserted/updated row -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value -returning *; -``` - -Insert-or-ignore pattern: - -```sql --- Insert only if not exists (no update) -insert into page_views (page_id, user_id) -values (1, 123) -on conflict (page_id, user_id) do nothing; -``` - -Reference: [INSERT ON CONFLICT](https://www.postgresql.org/docs/current/sql-insert.html#SQL-ON-CONFLICT) diff --git a/.junie/skills/supabase-postgres-best-practices/references/lock-advisory.md b/.junie/skills/supabase-postgres-best-practices/references/lock-advisory.md deleted file mode 100644 index 572eaf0d..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/lock-advisory.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Use Advisory Locks for Application-Level Locking -impact: MEDIUM -impactDescription: Efficient coordination without row-level lock overhead -tags: advisory-locks, coordination, application-locks ---- - -## Use Advisory Locks for Application-Level Locking - -Advisory locks provide application-level coordination without requiring database rows to lock. - -**Incorrect (creating rows just for locking):** - -```sql --- Creating dummy rows to lock on -create table resource_locks ( - resource_name text primary key -); - -insert into resource_locks values ('report_generator'); - --- Lock by selecting the row -select * from resource_locks where resource_name = 'report_generator' for update; -``` - -**Correct (advisory locks):** - -```sql --- Session-level advisory lock (released on disconnect or unlock) -select pg_advisory_lock(hashtext('report_generator')); --- ... do exclusive work ... -select pg_advisory_unlock(hashtext('report_generator')); - --- Transaction-level lock (released on commit/rollback) -begin; -select pg_advisory_xact_lock(hashtext('daily_report')); --- ... do work ... -commit; -- Lock automatically released -``` - -Try-lock for non-blocking operations: - -```sql --- Returns immediately with true/false instead of waiting -select pg_try_advisory_lock(hashtext('resource_name')); - --- Use in application -if (acquired) { - -- Do work - select pg_advisory_unlock(hashtext('resource_name')); -} else { - -- Skip or retry later -} -``` - -Reference: [Advisory Locks](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) diff --git a/.junie/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md b/.junie/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md deleted file mode 100644 index 974da5ed..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Prevent Deadlocks with Consistent Lock Ordering -impact: MEDIUM-HIGH -impactDescription: Eliminate deadlock errors, improve reliability -tags: deadlocks, locking, transactions, ordering ---- - -## Prevent Deadlocks with Consistent Lock Ordering - -Deadlocks occur when transactions lock resources in different orders. Always -acquire locks in a consistent order. - -**Incorrect (inconsistent lock ordering):** - -```sql --- Transaction A -- Transaction B -begin; begin; -update accounts update accounts -set balance = balance - 100 set balance = balance - 50 -where id = 1; where id = 2; -- B locks row 2 - -update accounts update accounts -set balance = balance + 100 set balance = balance + 50 -where id = 2; -- A waits for B where id = 1; -- B waits for A - --- DEADLOCK! Both waiting for each other -``` - -**Correct (lock rows in consistent order first):** - -```sql --- Explicitly acquire locks in ID order before updating -begin; -select * from accounts where id in (1, 2) order by id for update; - --- Now perform updates in any order - locks already held -update accounts set balance = balance - 100 where id = 1; -update accounts set balance = balance + 100 where id = 2; -commit; -``` - -Alternative: use a single statement to update atomically: - -```sql --- Single statement acquires all locks atomically -begin; -update accounts -set balance = balance + case id - when 1 then -100 - when 2 then 100 -end -where id in (1, 2); -commit; -``` - -Detect deadlocks in logs: - -```sql --- Check for recent deadlocks -select * from pg_stat_database where deadlocks > 0; - --- Enable deadlock logging -set log_lock_waits = on; -set deadlock_timeout = '1s'; -``` - -Reference: -[Deadlocks](https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-DEADLOCKS) diff --git a/.junie/skills/supabase-postgres-best-practices/references/lock-short-transactions.md b/.junie/skills/supabase-postgres-best-practices/references/lock-short-transactions.md deleted file mode 100644 index e6b8ef26..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/lock-short-transactions.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Keep Transactions Short to Reduce Lock Contention -impact: MEDIUM-HIGH -impactDescription: 3-5x throughput improvement, fewer deadlocks -tags: transactions, locking, contention, performance ---- - -## Keep Transactions Short to Reduce Lock Contention - -Long-running transactions hold locks that block other queries. Keep transactions as short as possible. - -**Incorrect (long transaction with external calls):** - -```sql -begin; -select * from orders where id = 1 for update; -- Lock acquired - --- Application makes HTTP call to payment API (2-5 seconds) --- Other queries on this row are blocked! - -update orders set status = 'paid' where id = 1; -commit; -- Lock held for entire duration -``` - -**Correct (minimal transaction scope):** - -```sql --- Validate data and call APIs outside transaction --- Application: response = await paymentAPI.charge(...) - --- Only hold lock for the actual update -begin; -update orders -set status = 'paid', payment_id = $1 -where id = $2 and status = 'pending' -returning *; -commit; -- Lock held for milliseconds -``` - -Use `statement_timeout` to prevent runaway transactions: - -```sql --- Abort queries running longer than 30 seconds -set statement_timeout = '30s'; - --- Or per-session -set local statement_timeout = '5s'; -``` - -Reference: [Transaction Management](https://www.postgresql.org/docs/current/tutorial-transactions.html) diff --git a/.junie/skills/supabase-postgres-best-practices/references/lock-skip-locked.md b/.junie/skills/supabase-postgres-best-practices/references/lock-skip-locked.md deleted file mode 100644 index 77bdbb97..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/lock-skip-locked.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Use SKIP LOCKED for Non-Blocking Queue Processing -impact: MEDIUM-HIGH -impactDescription: 10x throughput for worker queues -tags: skip-locked, queue, workers, concurrency ---- - -## Use SKIP LOCKED for Non-Blocking Queue Processing - -When multiple workers process a queue, SKIP LOCKED allows workers to process different rows without waiting. - -**Incorrect (workers block each other):** - -```sql --- Worker 1 and Worker 2 both try to get next job -begin; -select * from jobs where status = 'pending' order by created_at limit 1 for update; --- Worker 2 waits for Worker 1's lock to release! -``` - -**Correct (SKIP LOCKED for parallel processing):** - -```sql --- Each worker skips locked rows and gets the next available -begin; -select * from jobs -where status = 'pending' -order by created_at -limit 1 -for update skip locked; - --- Worker 1 gets job 1, Worker 2 gets job 2 (no waiting) - -update jobs set status = 'processing' where id = $1; -commit; -``` - -Complete queue pattern: - -```sql --- Atomic claim-and-update in one statement -update jobs -set status = 'processing', worker_id = $1, started_at = now() -where id = ( - select id from jobs - where status = 'pending' - order by created_at - limit 1 - for update skip locked -) -returning *; -``` - -Reference: [SELECT FOR UPDATE SKIP LOCKED](https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE) diff --git a/.junie/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md b/.junie/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md deleted file mode 100644 index 542978c3..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use EXPLAIN ANALYZE to Diagnose Slow Queries -impact: LOW-MEDIUM -impactDescription: Identify exact bottlenecks in query execution -tags: explain, analyze, diagnostics, query-plan ---- - -## Use EXPLAIN ANALYZE to Diagnose Slow Queries - -EXPLAIN ANALYZE executes the query and shows actual timings, revealing the true performance bottlenecks. - -**Incorrect (guessing at performance issues):** - -```sql --- Query is slow, but why? -select * from orders where customer_id = 123 and status = 'pending'; --- "It must be missing an index" - but which one? -``` - -**Correct (use EXPLAIN ANALYZE):** - -```sql -explain (analyze, buffers, format text) -select * from orders where customer_id = 123 and status = 'pending'; - --- Output reveals the issue: --- Seq Scan on orders (cost=0.00..25000.00 rows=50 width=100) (actual time=0.015..450.123 rows=50 loops=1) --- Filter: ((customer_id = 123) AND (status = 'pending'::text)) --- Rows Removed by Filter: 999950 --- Buffers: shared hit=5000 read=15000 --- Planning Time: 0.150 ms --- Execution Time: 450.500 ms -``` - -Key things to look for: - -```sql --- Seq Scan on large tables = missing index --- Rows Removed by Filter = poor selectivity or missing index --- Buffers: read >> hit = data not cached, needs more memory --- Nested Loop with high loops = consider different join strategy --- Sort Method: external merge = work_mem too low -``` - -Reference: [EXPLAIN](https://supabase.com/docs/guides/database/inspect) diff --git a/.junie/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md b/.junie/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md deleted file mode 100644 index d7e82f1a..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Enable pg_stat_statements for Query Analysis -impact: LOW-MEDIUM -impactDescription: Identify top resource-consuming queries -tags: pg-stat-statements, monitoring, statistics, performance ---- - -## Enable pg_stat_statements for Query Analysis - -pg_stat_statements tracks execution statistics for all queries, helping identify slow and frequent queries. - -**Incorrect (no visibility into query patterns):** - -```sql --- Database is slow, but which queries are the problem? --- No way to know without pg_stat_statements -``` - -**Correct (enable and query pg_stat_statements):** - -```sql --- Enable the extension -create extension if not exists pg_stat_statements; - --- Find slowest queries by total time -select - calls, - round(total_exec_time::numeric, 2) as total_time_ms, - round(mean_exec_time::numeric, 2) as mean_time_ms, - query -from pg_stat_statements -order by total_exec_time desc -limit 10; - --- Find most frequent queries -select calls, query -from pg_stat_statements -order by calls desc -limit 10; - --- Reset statistics after optimization -select pg_stat_statements_reset(); -``` - -Key metrics to monitor: - -```sql --- Queries with high mean time (candidates for optimization) -select query, mean_exec_time, calls -from pg_stat_statements -where mean_exec_time > 100 -- > 100ms average -order by mean_exec_time desc; -``` - -Reference: [pg_stat_statements](https://supabase.com/docs/guides/database/extensions/pg_stat_statements) diff --git a/.junie/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md b/.junie/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md deleted file mode 100644 index e0e8ea0b..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Maintain Table Statistics with VACUUM and ANALYZE -impact: MEDIUM -impactDescription: 2-10x better query plans with accurate statistics -tags: vacuum, analyze, statistics, maintenance, autovacuum ---- - -## Maintain Table Statistics with VACUUM and ANALYZE - -Outdated statistics cause the query planner to make poor decisions. VACUUM reclaims space, ANALYZE updates statistics. - -**Incorrect (stale statistics):** - -```sql --- Table has 1M rows but stats say 1000 --- Query planner chooses wrong strategy -explain select * from orders where status = 'pending'; --- Shows: Seq Scan (because stats show small table) --- Actually: Index Scan would be much faster -``` - -**Correct (maintain fresh statistics):** - -```sql --- Manually analyze after large data changes -analyze orders; - --- Analyze specific columns used in WHERE clauses -analyze orders (status, created_at); - --- Check when tables were last analyzed -select - relname, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze -from pg_stat_user_tables -order by last_analyze nulls first; -``` - -Autovacuum tuning for busy tables: - -```sql --- Increase frequency for high-churn tables -alter table orders set ( - autovacuum_vacuum_scale_factor = 0.05, -- Vacuum at 5% dead tuples (default 20%) - autovacuum_analyze_scale_factor = 0.02 -- Analyze at 2% changes (default 10%) -); - --- Check autovacuum status -select * from pg_stat_progress_vacuum; -``` - -Reference: [VACUUM](https://supabase.com/docs/guides/database/database-size#vacuum-operations) diff --git a/.junie/skills/supabase-postgres-best-practices/references/query-composite-indexes.md b/.junie/skills/supabase-postgres-best-practices/references/query-composite-indexes.md deleted file mode 100644 index fea64523..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/query-composite-indexes.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Create Composite Indexes for Multi-Column Queries -impact: HIGH -impactDescription: 5-10x faster multi-column queries -tags: indexes, composite-index, multi-column, query-optimization ---- - -## Create Composite Indexes for Multi-Column Queries - -When queries filter on multiple columns, a composite index is more efficient than separate single-column indexes. - -**Incorrect (separate indexes require bitmap scan):** - -```sql --- Two separate indexes -create index orders_status_idx on orders (status); -create index orders_created_idx on orders (created_at); - --- Query must combine both indexes (slower) -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Correct (composite index):** - -```sql --- Single composite index (leftmost column first for equality checks) -create index orders_status_created_idx on orders (status, created_at); - --- Query uses one efficient index scan -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Column order matters** - place equality columns first, range columns last: - -```sql --- Good: status (=) before created_at (>) -create index idx on orders (status, created_at); - --- Works for: WHERE status = 'pending' --- Works for: WHERE status = 'pending' AND created_at > '2024-01-01' --- Does NOT work for: WHERE created_at > '2024-01-01' (leftmost prefix rule) -``` - -Reference: [Multicolumn Indexes](https://www.postgresql.org/docs/current/indexes-multicolumn.html) diff --git a/.junie/skills/supabase-postgres-best-practices/references/query-covering-indexes.md b/.junie/skills/supabase-postgres-best-practices/references/query-covering-indexes.md deleted file mode 100644 index 9d2a4947..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/query-covering-indexes.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Use Covering Indexes to Avoid Table Lookups -impact: MEDIUM-HIGH -impactDescription: 2-5x faster queries by eliminating heap fetches -tags: indexes, covering-index, include, index-only-scan ---- - -## Use Covering Indexes to Avoid Table Lookups - -Covering indexes include all columns needed by a query, enabling index-only scans that skip the table entirely. - -**Incorrect (index scan + heap fetch):** - -```sql -create index users_email_idx on users (email); - --- Must fetch name and created_at from table heap -select email, name, created_at from users where email = 'user@example.com'; -``` - -**Correct (index-only scan with INCLUDE):** - -```sql --- Include non-searchable columns in the index -create index users_email_idx on users (email) include (name, created_at); - --- All columns served from index, no table access needed -select email, name, created_at from users where email = 'user@example.com'; -``` - -Use INCLUDE for columns you SELECT but don't filter on: - -```sql --- Searching by status, but also need customer_id and total -create index orders_status_idx on orders (status) include (customer_id, total); - -select status, customer_id, total from orders where status = 'shipped'; -``` - -Reference: [Index-Only Scans](https://www.postgresql.org/docs/current/indexes-index-only-scans.html) diff --git a/.junie/skills/supabase-postgres-best-practices/references/query-index-types.md b/.junie/skills/supabase-postgres-best-practices/references/query-index-types.md deleted file mode 100644 index 93b32590..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/query-index-types.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Choose the Right Index Type for Your Data -impact: HIGH -impactDescription: 10-100x improvement with correct index type -tags: indexes, btree, gin, gist, brin, hash, index-types ---- - -## Choose the Right Index Type for Your Data - -Different index types excel at different query patterns. The default B-tree isn't always optimal. - -**Incorrect (B-tree for JSONB containment):** - -```sql --- B-tree cannot optimize containment operators -create index products_attrs_idx on products (attributes); -select * from products where attributes @> '{"color": "red"}'; --- Full table scan - B-tree doesn't support @> operator -``` - -**Correct (GIN for JSONB):** - -```sql --- GIN supports @>, ?, ?&, ?| operators -create index products_attrs_idx on products using gin (attributes); -select * from products where attributes @> '{"color": "red"}'; -``` - -Index type guide: - -```sql --- B-tree (default): =, <, >, BETWEEN, IN, IS NULL -create index users_created_idx on users (created_at); - --- GIN: arrays, JSONB, full-text search -create index posts_tags_idx on posts using gin (tags); - --- GiST: geometric data, range types, nearest-neighbor (KNN) queries -create index locations_idx on places using gist (location); - --- BRIN: large time-series tables (10-100x smaller) -create index events_time_idx on events using brin (created_at); - --- Hash: equality-only (slightly faster than B-tree for =) -create index sessions_token_idx on sessions using hash (token); -``` - -Reference: [Index Types](https://www.postgresql.org/docs/current/indexes-types.html) diff --git a/.junie/skills/supabase-postgres-best-practices/references/query-missing-indexes.md b/.junie/skills/supabase-postgres-best-practices/references/query-missing-indexes.md deleted file mode 100644 index e6daace7..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/query-missing-indexes.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Add Indexes on WHERE and JOIN Columns -impact: CRITICAL -impactDescription: 100-1000x faster queries on large tables -tags: indexes, performance, sequential-scan, query-optimization ---- - -## Add Indexes on WHERE and JOIN Columns - -Queries filtering or joining on unindexed columns cause full table scans, which become exponentially slower as tables grow. - -**Incorrect (sequential scan on large table):** - -```sql --- No index on customer_id causes full table scan -select * from orders where customer_id = 123; - --- EXPLAIN shows: Seq Scan on orders (cost=0.00..25000.00 rows=100 width=85) -``` - -**Correct (index scan):** - -```sql --- Create index on frequently filtered column -create index orders_customer_id_idx on orders (customer_id); - -select * from orders where customer_id = 123; - --- EXPLAIN shows: Index Scan using orders_customer_id_idx (cost=0.42..8.44 rows=100 width=85) -``` - -For JOIN columns, always index the foreign key side: - -```sql --- Index the referencing column -create index orders_customer_id_idx on orders (customer_id); - -select c.name, o.total -from customers c -join orders o on o.customer_id = c.id; -``` - -Reference: [Query Optimization](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.junie/skills/supabase-postgres-best-practices/references/query-partial-indexes.md b/.junie/skills/supabase-postgres-best-practices/references/query-partial-indexes.md deleted file mode 100644 index 3e61a341..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/query-partial-indexes.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use Partial Indexes for Filtered Queries -impact: HIGH -impactDescription: 5-20x smaller indexes, faster writes and queries -tags: indexes, partial-index, query-optimization, storage ---- - -## Use Partial Indexes for Filtered Queries - -Partial indexes only include rows matching a WHERE condition, making them smaller and faster when queries consistently filter on the same condition. - -**Incorrect (full index includes irrelevant rows):** - -```sql --- Index includes all rows, even soft-deleted ones -create index users_email_idx on users (email); - --- Query always filters active users -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -**Correct (partial index matches query filter):** - -```sql --- Index only includes active users -create index users_active_email_idx on users (email) -where deleted_at is null; - --- Query uses the smaller, faster index -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -Common use cases for partial indexes: - -```sql --- Only pending orders (status rarely changes once completed) -create index orders_pending_idx on orders (created_at) -where status = 'pending'; - --- Only non-null values -create index products_sku_idx on products (sku) -where sku is not null; -``` - -Reference: [Partial Indexes](https://www.postgresql.org/docs/current/indexes-partial.html) diff --git a/.junie/skills/supabase-postgres-best-practices/references/schema-constraints.md b/.junie/skills/supabase-postgres-best-practices/references/schema-constraints.md deleted file mode 100644 index 1d2ef8f9..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/schema-constraints.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Add Constraints Safely in Migrations -impact: HIGH -impactDescription: Prevents migration failures and enables idempotent schema changes -tags: constraints, migrations, schema, alter-table ---- - -## Add Constraints Safely in Migrations - -PostgreSQL does not support `ADD CONSTRAINT IF NOT EXISTS`. Migrations using this syntax will fail. - -**Incorrect (causes syntax error):** - -```sql --- ERROR: syntax error at or near "not" (SQLSTATE 42601) -alter table public.profiles -add constraint if not exists profiles_birthchart_id_unique unique (birthchart_id); -``` - -**Correct (idempotent constraint creation):** - -```sql --- Use DO block to check before adding -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_unique' - and conrelid = 'public.profiles'::regclass - ) then - alter table public.profiles - add constraint profiles_birthchart_id_unique unique (birthchart_id); - end if; -end $$; -``` - -For all constraint types: - -```sql --- Check constraints -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'check_age_positive' - ) then - alter table users add constraint check_age_positive check (age > 0); - end if; -end $$; - --- Foreign keys -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_fkey' - ) then - alter table profiles - add constraint profiles_birthchart_id_fkey - foreign key (birthchart_id) references birthcharts(id); - end if; -end $$; -``` - -Check if constraint exists: - -```sql --- Query to check constraint existence -select conname, contype, pg_get_constraintdef(oid) -from pg_constraint -where conrelid = 'public.profiles'::regclass; - --- contype values: --- 'p' = PRIMARY KEY --- 'f' = FOREIGN KEY --- 'u' = UNIQUE --- 'c' = CHECK -``` - -Reference: [Constraints](https://www.postgresql.org/docs/current/ddl-constraints.html) diff --git a/.junie/skills/supabase-postgres-best-practices/references/schema-data-types.md b/.junie/skills/supabase-postgres-best-practices/references/schema-data-types.md deleted file mode 100644 index f253a581..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/schema-data-types.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Choose Appropriate Data Types -impact: HIGH -impactDescription: 50% storage reduction, faster comparisons -tags: data-types, schema, storage, performance ---- - -## Choose Appropriate Data Types - -Using the right data types reduces storage, improves query performance, and prevents bugs. - -**Incorrect (wrong data types):** - -```sql -create table users ( - id int, -- Will overflow at 2.1 billion - email varchar(255), -- Unnecessary length limit - created_at timestamp, -- Missing timezone info - is_active varchar(5), -- String for boolean - price varchar(20) -- String for numeric -); -``` - -**Correct (appropriate data types):** - -```sql -create table users ( - id bigint generated always as identity primary key, -- 9 quintillion max - email text, -- No artificial limit, same performance as varchar - created_at timestamptz, -- Always store timezone-aware timestamps - is_active boolean default true, -- 1 byte vs variable string length - price numeric(10,2) -- Exact decimal arithmetic -); -``` - -Key guidelines: - -```sql --- IDs: use bigint, not int (future-proofing) --- Strings: use text, not varchar(n) unless constraint needed --- Time: use timestamptz, not timestamp --- Money: use numeric, not float (precision matters) --- Enums: use text with check constraint or create enum type -``` - -Reference: [Data Types](https://www.postgresql.org/docs/current/datatype.html) diff --git a/.junie/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md b/.junie/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md deleted file mode 100644 index 6c3d6ff6..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Index Foreign Key Columns -impact: HIGH -impactDescription: 10-100x faster JOINs and CASCADE operations -tags: foreign-key, indexes, joins, schema ---- - -## Index Foreign Key Columns - -Postgres does not automatically index foreign key columns. Missing indexes cause slow JOINs and CASCADE operations. - -**Incorrect (unindexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- No index on customer_id! --- JOINs and ON DELETE CASCADE both require full table scan -select * from orders where customer_id = 123; -- Seq Scan -delete from customers where id = 123; -- Locks table, scans all orders -``` - -**Correct (indexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- Always index the FK column -create index orders_customer_id_idx on orders (customer_id); - --- Now JOINs and cascades are fast -select * from orders where customer_id = 123; -- Index Scan -delete from customers where id = 123; -- Uses index, fast cascade -``` - -Find missing FK indexes: - -```sql -select - conrelid::regclass as table_name, - a.attname as fk_column -from pg_constraint c -join pg_attribute a on a.attrelid = c.conrelid and a.attnum = any(c.conkey) -where c.contype = 'f' - and not exists ( - select 1 from pg_index i - where i.indrelid = c.conrelid and a.attnum = any(i.indkey) - ); -``` - -Reference: [Foreign Keys](https://www.postgresql.org/docs/current/ddl-constraints.html#DDL-CONSTRAINTS-FK) diff --git a/.junie/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md b/.junie/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md deleted file mode 100644 index f0072940..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use Lowercase Identifiers for Compatibility -impact: MEDIUM -impactDescription: Avoid case-sensitivity bugs with tools, ORMs, and AI assistants -tags: naming, identifiers, case-sensitivity, schema, conventions ---- - -## Use Lowercase Identifiers for Compatibility - -PostgreSQL folds unquoted identifiers to lowercase. Quoted mixed-case identifiers require quotes forever and cause issues with tools, ORMs, and AI assistants that may not recognize them. - -**Incorrect (mixed-case identifiers):** - -```sql --- Quoted identifiers preserve case but require quotes everywhere -CREATE TABLE "Users" ( - "userId" bigint PRIMARY KEY, - "firstName" text, - "lastName" text -); - --- Must always quote or queries fail -SELECT "firstName" FROM "Users" WHERE "userId" = 1; - --- This fails - Users becomes users without quotes -SELECT firstName FROM Users; --- ERROR: relation "users" does not exist -``` - -**Correct (lowercase snake_case):** - -```sql --- Unquoted lowercase identifiers are portable and tool-friendly -CREATE TABLE users ( - user_id bigint PRIMARY KEY, - first_name text, - last_name text -); - --- Works without quotes, recognized by all tools -SELECT first_name FROM users WHERE user_id = 1; -``` - -Common sources of mixed-case identifiers: - -```sql --- ORMs often generate quoted camelCase - configure them to use snake_case --- Migrations from other databases may preserve original casing --- Some GUI tools quote identifiers by default - disable this - --- If stuck with mixed-case, create views as a compatibility layer -CREATE VIEW users AS SELECT "userId" AS user_id, "firstName" AS first_name FROM "Users"; -``` - -Reference: [Identifiers and Key Words](https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS) diff --git a/.junie/skills/supabase-postgres-best-practices/references/schema-partitioning.md b/.junie/skills/supabase-postgres-best-practices/references/schema-partitioning.md deleted file mode 100644 index 13137a03..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/schema-partitioning.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Partition Large Tables for Better Performance -impact: MEDIUM-HIGH -impactDescription: 5-20x faster queries and maintenance on large tables -tags: partitioning, large-tables, time-series, performance ---- - -## Partition Large Tables for Better Performance - -Partitioning splits a large table into smaller pieces, improving query performance and maintenance operations. - -**Incorrect (single large table):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz, - data jsonb -); - --- 500M rows, queries scan everything -select * from events where created_at > '2024-01-01'; -- Slow -vacuum events; -- Takes hours, locks table -``` - -**Correct (partitioned by time range):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz not null, - data jsonb -) partition by range (created_at); - --- Create partitions for each month -create table events_2024_01 partition of events - for values from ('2024-01-01') to ('2024-02-01'); - -create table events_2024_02 partition of events - for values from ('2024-02-01') to ('2024-03-01'); - --- Queries only scan relevant partitions -select * from events where created_at > '2024-01-15'; -- Only scans events_2024_01+ - --- Drop old data instantly -drop table events_2023_01; -- Instant vs DELETE taking hours -``` - -When to partition: - -- Tables > 100M rows -- Time-series data with date-based queries -- Need to efficiently drop old data - -Reference: [Table Partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) diff --git a/.junie/skills/supabase-postgres-best-practices/references/schema-primary-keys.md b/.junie/skills/supabase-postgres-best-practices/references/schema-primary-keys.md deleted file mode 100644 index fb0fbb16..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/schema-primary-keys.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Select Optimal Primary Key Strategy -impact: HIGH -impactDescription: Better index locality, reduced fragmentation -tags: primary-key, identity, uuid, serial, schema ---- - -## Select Optimal Primary Key Strategy - -Primary key choice affects insert performance, index size, and replication -efficiency. - -**Incorrect (problematic PK choices):** - -```sql --- identity is the SQL-standard approach -create table users ( - id serial primary key -- Works, but IDENTITY is recommended -); - --- Random UUIDs (v4) cause index fragmentation -create table orders ( - id uuid default gen_random_uuid() primary key -- UUIDv4 = random = scattered inserts -); -``` - -**Correct (optimal PK strategies):** - -```sql --- Use IDENTITY for sequential IDs (SQL-standard, best for most cases) -create table users ( - id bigint generated always as identity primary key -); - --- For distributed systems needing UUIDs, use UUIDv7 (time-ordered) --- Requires pg_uuidv7 extension: create extension pg_uuidv7; -create table orders ( - id uuid default uuid_generate_v7() primary key -- Time-ordered, no fragmentation -); - --- Alternative: time-prefixed IDs for sortable, distributed IDs (no extension needed) -create table events ( - id text default concat( - to_char(now() at time zone 'utc', 'YYYYMMDDHH24MISSMS'), - gen_random_uuid()::text - ) primary key -); -``` - -Guidelines: - -- Single database: `bigint identity` (sequential, 8 bytes, SQL-standard) -- Distributed/exposed IDs: UUIDv7 (requires pg_uuidv7) or ULID (time-ordered, no - fragmentation) -- `serial` works but `identity` is SQL-standard and preferred for new - applications -- Avoid random UUIDs (v4) as primary keys on large tables (causes index - fragmentation) - -Reference: -[Identity Columns](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-PARMS-GENERATED-IDENTITY) diff --git a/.junie/skills/supabase-postgres-best-practices/references/security-privileges.md b/.junie/skills/supabase-postgres-best-practices/references/security-privileges.md deleted file mode 100644 index 448ec345..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/security-privileges.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Apply Principle of Least Privilege -impact: MEDIUM -impactDescription: Reduced attack surface, better audit trail -tags: privileges, security, roles, permissions ---- - -## Apply Principle of Least Privilege - -Grant only the minimum permissions required. Never use superuser for application queries. - -**Incorrect (overly broad permissions):** - -```sql --- Application uses superuser connection --- Or grants ALL to application role -grant all privileges on all tables in schema public to app_user; -grant all privileges on all sequences in schema public to app_user; - --- Any SQL injection becomes catastrophic --- drop table users; cascades to everything -``` - -**Correct (minimal, specific grants):** - -```sql --- Create role with no default privileges -create role app_readonly nologin; - --- Grant only SELECT on specific tables -grant usage on schema public to app_readonly; -grant select on public.products, public.categories to app_readonly; - --- Create role for writes with limited scope -create role app_writer nologin; -grant usage on schema public to app_writer; -grant select, insert, update on public.orders to app_writer; -grant usage on sequence orders_id_seq to app_writer; --- No DELETE permission - --- Login role inherits from these -create role app_user login password 'xxx'; -grant app_writer to app_user; -``` - -Revoke public defaults: - -```sql --- Revoke default public access -revoke all on schema public from public; -revoke all on all tables in schema public from public; -``` - -Reference: [Roles and Privileges](https://supabase.com/blog/postgres-roles-and-privileges) diff --git a/.junie/skills/supabase-postgres-best-practices/references/security-rls-basics.md b/.junie/skills/supabase-postgres-best-practices/references/security-rls-basics.md deleted file mode 100644 index c61e1a85..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/security-rls-basics.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Enable Row Level Security for Multi-Tenant Data -impact: CRITICAL -impactDescription: Database-enforced tenant isolation, prevent data leaks -tags: rls, row-level-security, multi-tenant, security ---- - -## Enable Row Level Security for Multi-Tenant Data - -Row Level Security (RLS) enforces data access at the database level, ensuring users only see their own data. - -**Incorrect (application-level filtering only):** - -```sql --- Relying only on application to filter -select * from orders where user_id = $current_user_id; - --- Bug or bypass means all data is exposed! -select * from orders; -- Returns ALL orders -``` - -**Correct (database-enforced RLS):** - -```sql --- Enable RLS on the table -alter table orders enable row level security; - --- Create policy for users to see only their orders -create policy orders_user_policy on orders - for all - using (user_id = current_setting('app.current_user_id')::bigint); - --- Force RLS even for table owners -alter table orders force row level security; - --- Set user context and query -set app.current_user_id = '123'; -select * from orders; -- Only returns orders for user 123 -``` - -Policy for authenticated role: - -```sql -create policy orders_user_policy on orders - for all - to authenticated - using (user_id = auth.uid()); -``` - -Reference: [Row Level Security](https://supabase.com/docs/guides/database/postgres/row-level-security) diff --git a/.junie/skills/supabase-postgres-best-practices/references/security-rls-performance.md b/.junie/skills/supabase-postgres-best-practices/references/security-rls-performance.md deleted file mode 100644 index b32d92f7..00000000 --- a/.junie/skills/supabase-postgres-best-practices/references/security-rls-performance.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Optimize RLS Policies for Performance -impact: HIGH -impactDescription: 5-10x faster RLS queries with proper patterns -tags: rls, performance, security, optimization ---- - -## Optimize RLS Policies for Performance - -Poorly written RLS policies can cause severe performance issues. Use subqueries and indexes strategically. - -**Incorrect (function called for every row):** - -```sql -create policy orders_policy on orders - using (auth.uid() = user_id); -- auth.uid() called per row! - --- With 1M rows, auth.uid() is called 1M times -``` - -**Correct (wrap functions in SELECT):** - -```sql -create policy orders_policy on orders - using ((select auth.uid()) = user_id); -- Called once, cached - --- 100x+ faster on large tables -``` - -Use security definer functions for complex checks: - -```sql --- Create helper function (runs as definer, bypasses RLS) -create or replace function is_team_member(team_id bigint) -returns boolean -language sql -security definer -set search_path = '' -as $$ - select exists ( - select 1 from public.team_members - where team_id = $1 and user_id = (select auth.uid()) - ); -$$; - --- Use in policy (indexed lookup, not per-row check) -create policy team_orders_policy on orders - using ((select is_team_member(team_id))); -``` - -Always add indexes on columns used in RLS policies: - -```sql -create index orders_user_id_idx on orders (user_id); -``` - -Reference: [RLS Performance](https://supabase.com/docs/guides/database/postgres/row-level-security#rls-performance-recommendations) diff --git a/.lighthouserc.json b/.lighthouserc.json deleted file mode 100644 index ce4a64df..00000000 --- a/.lighthouserc.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "ci": { - "assert": { - "assertions": { - "categories:performance": ["warn", { "minScore": 0.7 }], - "categories:accessibility": ["warn", { "minScore": 0.8 }], - "categories:best-practices": ["warn", { "minScore": 0.8 }], - "categories:seo": ["warn", { "minScore": 0.9 }], - "first-contentful-paint": ["warn", { "maxNumericValue": 3000 }], - "largest-contentful-paint": ["warn", { "maxNumericValue": 4000 }], - "total-blocking-time": ["warn", { "maxNumericValue": 300 }], - "cumulative-layout-shift": ["warn", { "maxNumericValue": 0.1 }], - "speed-index": ["warn", { "maxNumericValue": 4000 }] - } - }, - "upload": { - "target": "temporary-public-storage" - } - } -} diff --git a/.vscode-extension/package.json b/.vscode-extension/package.json deleted file mode 100644 index e744ccc8..00000000 --- a/.vscode-extension/package.json +++ /dev/null @@ -1,93 +0,0 @@ -{ - "name": "tradehax-bot", - "displayName": "TradeHax Bot Manager", - "description": "Manage and monitor TradeHax automated trading bots from VSCode", - "version": "1.0.0", - "publisher": "DarkModder33", - "engines": { - "vscode": "^1.85.0" - }, - "categories": [ - "Other", - "Productivity" - ], - "keywords": [ - "trading", - "solana", - "bot", - "automation", - "crypto" - ], - "activationEvents": [ - "onCommand:tradehax.openDashboard", - "onCommand:tradehax.createBot", - "onCommand:tradehax.viewStats" - ], - "main": "./dist/extension.js", - "contributes": { - "commands": [ - { - "command": "tradehax.openDashboard", - "title": "TradeHax: Open Bot Dashboard" - }, - { - "command": "tradehax.createBot", - "title": "TradeHax: Create New Bot" - }, - { - "command": "tradehax.viewStats", - "title": "TradeHax: View Bot Statistics" - }, - { - "command": "tradehax.startBot", - "title": "TradeHax: Start Bot" - }, - { - "command": "tradehax.stopBot", - "title": "TradeHax: Stop Bot" - } - ], - "viewsContainers": { - "activitybar": [ - { - "id": "tradehax-explorer", - "title": "TradeHax", - "icon": "media/tradehax-icon.svg" - } - ] - }, - "views": { - "tradehax-explorer": [ - { - "id": "tradehax-bots", - "name": "Active Bots" - }, - { - "id": "tradehax-portfolio", - "name": "Portfolio" - }, - { - "id": "tradehax-signals", - "name": "Trading Signals" - } - ] - } - }, - "scripts": { - "vscode:prepublish": "npm run esbuild-base -- --minify", - "esbuild-base": "esbuild ./src/extension.ts --bundle --outfile=dist/extension.js --external:vscode --platform=node", - "esbuild": "npm run esbuild-base -- --sourcemap", - "esbuild-watch": "npm run esbuild-base -- --sourcemap --watch", - "test": "node ./out/test/runTest.js" - }, - "devDependencies": { - "@types/vscode": "^1.85.0", - "@types/node": "^20.0.0", - "esbuild": "^0.27.3", - "typescript": "^5.0.0" - }, - "dependencies": { - "axios": "^1.6.0", - "ws": "^8.14.0" - } -} diff --git a/.vscode-extension/src/extension.ts b/.vscode-extension/src/extension.ts deleted file mode 100644 index d8a368c8..00000000 --- a/.vscode-extension/src/extension.ts +++ /dev/null @@ -1,218 +0,0 @@ -import * as vscode from "vscode"; -import axios from "axios"; - -export function activate(context: vscode.ExtensionContext) { - console.log("TradeHax extension is now active"); - - // Command: Open Dashboard - const openDashboard = vscode.commands.registerCommand( - "tradehax.openDashboard", - async () => { - const panel = vscode.window.createWebviewPanel( - "tradehaxDashboard", - "TradeHax Bot Dashboard", - vscode.ViewColumn.One, - { enableScripts: true } - ); - - const apiUrl = vscode.workspace - .getConfiguration("tradehax") - .get("apiUrl", "http://localhost:3000"); - - panel.webview.html = getWebviewContent(apiUrl); - } - ); - - // Command: Create Bot - const createBot = vscode.commands.registerCommand( - "tradehax.createBot", - async () => { - const name = await vscode.window.showInputBox({ - prompt: "Enter bot name", - placeholder: "My Trading Bot", - }); - - if (!name) return; - - const strategy = await vscode.window.showQuickPick( - ["scalping", "swing", "long-term", "arbitrage"], - { placeHolder: "Select strategy" } - ); - - if (!strategy) return; - - try { - const apiUrl = vscode.workspace - .getConfiguration("tradehax") - .get("apiUrl", "http://localhost:3000"); - - const response = await axios.post(`${apiUrl}/api/trading/bot/create`, { - name, - strategy, - riskLevel: "medium", - allocatedCapital: 5, - }); - - vscode.window.showInformationMessage( - `βœ… Bot "${name}" created successfully!` - ); - } catch (error) { - vscode.window.showErrorMessage( - `Failed to create bot: ${error instanceof Error ? error.message : "Unknown error"}` - ); - } - } - ); - - // Command: View Stats - const viewStats = vscode.commands.registerCommand( - "tradehax.viewStats", - async () => { - const botId = await vscode.window.showInputBox({ - prompt: "Enter bot ID", - placeholder: "bot-123456", - }); - - if (!botId) return; - - try { - const apiUrl = vscode.workspace - .getConfiguration("tradehax") - .get("apiUrl", "http://localhost:3000"); - - const response = await axios.get( - `${apiUrl}/api/trading/bot/${botId}/stats` - ); - - const stats = response.data.stats; - vscode.window.showInformationMessage( - `πŸ“Š Bot Stats:\n\n` + - `Trades: ${stats.totalTrades}\n` + - `Win Rate: ${stats.winRate}%\n` + - `Profit: ${stats.netProfit} SOL` - ); - } catch (error) { - vscode.window.showErrorMessage( - `Failed to fetch stats: ${error instanceof Error ? error.message : "Unknown error"}` - ); - } - } - ); - - context.subscriptions.push(openDashboard, createBot, viewStats); -} - -function getWebviewContent(apiUrl: string): string { - return ` - - - - - - TradeHax Dashboard - - - -
-

πŸ€– TradeHax Bot Dashboard

-

API: ${apiUrl}

-
-
- Loading bots... -
-
-
- - - - - `; -} - -export function deactivate() { - console.log("TradeHax extension deactivated"); -} diff --git a/.windsurf/skills/supabase-postgres-best-practices/AGENTS.md b/.windsurf/skills/supabase-postgres-best-practices/AGENTS.md deleted file mode 100644 index a7baf445..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/AGENTS.md +++ /dev/null @@ -1,68 +0,0 @@ -# Supabase Postgres Best Practices - -## Structure - -``` -supabase-postgres-best-practices/ - SKILL.md # Main skill file - read this first - AGENTS.md # This navigation guide - CLAUDE.md # Symlink to AGENTS.md - references/ # Detailed reference files -``` - -## Usage - -1. Read `SKILL.md` for the main skill instructions -2. Browse `references/` for detailed documentation on specific topics -3. Reference files are loaded on-demand - read only what you need - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.windsurf/skills/supabase-postgres-best-practices/CLAUDE.md b/.windsurf/skills/supabase-postgres-best-practices/CLAUDE.md deleted file mode 100644 index 47dc3e3d..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/CLAUDE.md +++ /dev/null @@ -1 +0,0 @@ -AGENTS.md \ No newline at end of file diff --git a/.windsurf/skills/supabase-postgres-best-practices/README.md b/.windsurf/skills/supabase-postgres-best-practices/README.md deleted file mode 100644 index f1a374e1..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# Supabase Postgres Best Practices - Contributor Guide - -This skill contains Postgres performance optimization references optimized for -AI agents and LLMs. It follows the [Agent Skills Open Standard](https://agentskills.io/). - -## Quick Start - -```bash -# From repository root -npm install - -# Validate existing references -npm run validate - -# Build AGENTS.md -npm run build -``` - -## Creating a New Reference - -1. **Choose a section prefix** based on the category: - - `query-` Query Performance (CRITICAL) - - `conn-` Connection Management (CRITICAL) - - `security-` Security & RLS (CRITICAL) - - `schema-` Schema Design (HIGH) - - `lock-` Concurrency & Locking (MEDIUM-HIGH) - - `data-` Data Access Patterns (MEDIUM) - - `monitor-` Monitoring & Diagnostics (LOW-MEDIUM) - - `advanced-` Advanced Features (LOW) - -2. **Copy the template**: - ```bash - cp references/_template.md references/query-your-reference-name.md - ``` - -3. **Fill in the content** following the template structure - -4. **Validate and build**: - ```bash - npm run validate - npm run build - ``` - -5. **Review** the generated `AGENTS.md` - -## Skill Structure - -``` -skills/supabase-postgres-best-practices/ -β”œβ”€β”€ SKILL.md # Agent-facing skill manifest (Agent Skills spec) -β”œβ”€β”€ AGENTS.md # [GENERATED] Compiled references document -β”œβ”€β”€ README.md # This file -└── references/ - β”œβ”€β”€ _template.md # Reference template - β”œβ”€β”€ _sections.md # Section definitions - β”œβ”€β”€ _contributing.md # Writing guidelines - └── *.md # Individual references - -packages/skills-build/ -β”œβ”€β”€ src/ # Generic build system source -└── package.json # NPM scripts -``` - -## Reference File Structure - -See `references/_template.md` for the complete template. Key elements: - -````markdown ---- -title: Clear, Action-Oriented Title -impact: CRITICAL|HIGH|MEDIUM-HIGH|MEDIUM|LOW-MEDIUM|LOW -impactDescription: Quantified benefit (e.g., "10-100x faster") -tags: relevant, keywords ---- - -## [Title] - -[1-2 sentence explanation] - -**Incorrect (description):** - -```sql --- Comment explaining what's wrong -[Bad SQL example] -``` -```` - -**Correct (description):** - -```sql --- Comment explaining why this is better -[Good SQL example] -``` - -``` -## Writing Guidelines - -See `references/_contributing.md` for detailed guidelines. Key principles: - -1. **Show concrete transformations** - "Change X to Y", not abstract advice -2. **Error-first structure** - Show the problem before the solution -3. **Quantify impact** - Include specific metrics (10x faster, 50% smaller) -4. **Self-contained examples** - Complete, runnable SQL -5. **Semantic naming** - Use meaningful names (users, email), not (table1, col1) - -## Impact Levels - -| Level | Improvement | Examples | -|-------|-------------|----------| -| CRITICAL | 10-100x | Missing indexes, connection exhaustion | -| HIGH | 5-20x | Wrong index types, poor partitioning | -| MEDIUM-HIGH | 2-5x | N+1 queries, RLS optimization | -| MEDIUM | 1.5-3x | Redundant indexes, stale statistics | -| LOW-MEDIUM | 1.2-2x | VACUUM tuning, config tweaks | -| LOW | Incremental | Advanced patterns, edge cases | -``` diff --git a/.windsurf/skills/supabase-postgres-best-practices/SKILL.md b/.windsurf/skills/supabase-postgres-best-practices/SKILL.md deleted file mode 100644 index f80be156..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/SKILL.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -name: supabase-postgres-best-practices -description: Postgres performance optimization and best practices from Supabase. Use this skill when writing, reviewing, or optimizing Postgres queries, schema designs, or database configurations. -license: MIT -metadata: - author: supabase - version: "1.1.0" - organization: Supabase - date: January 2026 - abstract: Comprehensive Postgres performance optimization guide for developers using Supabase and Postgres. Contains performance rules across 8 categories, prioritized by impact from critical (query performance, connection management) to incremental (advanced features). Each rule includes detailed explanations, incorrect vs. correct SQL examples, query plan analysis, and specific performance metrics to guide automated optimization and code generation. ---- - -# Supabase Postgres Best Practices - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/_contributing.md b/.windsurf/skills/supabase-postgres-best-practices/references/_contributing.md deleted file mode 100644 index 10de8ecb..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/_contributing.md +++ /dev/null @@ -1,171 +0,0 @@ -# Writing Guidelines for Postgres References - -This document provides guidelines for creating effective Postgres best -practice references that work well with AI agents and LLMs. - -## Key Principles - -### 1. Concrete Transformation Patterns - -Show exact SQL rewrites. Avoid philosophical advice. - -**Good:** "Use `WHERE id = ANY(ARRAY[...])` instead of -`WHERE id IN (SELECT ...)`" **Bad:** "Design good schemas" - -### 2. Error-First Structure - -Always show the problematic pattern first, then the solution. This trains agents -to recognize anti-patterns. - -```markdown -**Incorrect (sequential queries):** [bad example] - -**Correct (batched query):** [good example] -``` - -### 3. Quantified Impact - -Include specific metrics. Helps agents prioritize fixes. - -**Good:** "10x faster queries", "50% smaller index", "Eliminates N+1" -**Bad:** "Faster", "Better", "More efficient" - -### 4. Self-Contained Examples - -Examples should be complete and runnable (or close to it). Include `CREATE TABLE` -if context is needed. - -```sql --- Include table definition when needed for clarity -CREATE TABLE users ( - id bigint PRIMARY KEY, - email text NOT NULL, - deleted_at timestamptz -); - --- Now show the index -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; -``` - -### 5. Semantic Naming - -Use meaningful table/column names. Names carry intent for LLMs. - -**Good:** `users`, `email`, `created_at`, `is_active` -**Bad:** `table1`, `col1`, `field`, `flag` - ---- - -## Code Example Standards - -### SQL Formatting - -```sql --- Use lowercase keywords, clear formatting -CREATE INDEX CONCURRENTLY users_email_idx - ON users(email) - WHERE deleted_at IS NULL; - --- Not cramped or ALL CAPS -CREATE INDEX CONCURRENTLY USERS_EMAIL_IDX ON USERS(EMAIL) WHERE DELETED_AT IS NULL; -``` - -### Comments - -- Explain _why_, not _what_ -- Highlight performance implications -- Point out common pitfalls - -### Language Tags - -- `sql` - Standard SQL queries -- `plpgsql` - Stored procedures/functions -- `typescript` - Application code (when needed) -- `python` - Application code (when needed) - ---- - -## When to Include Application Code - -**Default: SQL Only** - -Most references should focus on pure SQL patterns. This keeps examples portable. - -**Include Application Code When:** - -- Connection pooling configuration -- Transaction management in application context -- ORM anti-patterns (N+1 in Prisma/TypeORM) -- Prepared statement usage - -**Format for Mixed Examples:** - -````markdown -**Incorrect (N+1 in application):** - -```typescript -for (const user of users) { - const posts = await db.query("SELECT * FROM posts WHERE user_id = $1", [ - user.id, - ]); -} -``` -```` - -**Correct (batch query):** - -```typescript -const posts = await db.query("SELECT * FROM posts WHERE user_id = ANY($1)", [ - userIds, -]); -``` - ---- - -## Impact Level Guidelines - -| Level | Improvement | Use When | -|-------|-------------|----------| -| **CRITICAL** | 10-100x | Missing indexes, connection exhaustion, sequential scans on large tables | -| **HIGH** | 5-20x | Wrong index types, poor partitioning, missing covering indexes | -| **MEDIUM-HIGH** | 2-5x | N+1 queries, inefficient pagination, RLS optimization | -| **MEDIUM** | 1.5-3x | Redundant indexes, query plan instability | -| **LOW-MEDIUM** | 1.2-2x | VACUUM tuning, configuration tweaks | -| **LOW** | Incremental | Advanced patterns, edge cases | - ---- - -## Reference Standards - -**Primary Sources:** - -- Official Postgres documentation -- Supabase documentation -- Postgres wiki -- Established blogs (2ndQuadrant, Crunchy Data) - -**Format:** - -```markdown -Reference: -[Postgres Indexes](https://www.postgresql.org/docs/current/indexes.html) -``` - ---- - -## Review Checklist - -Before submitting a reference: - -- [ ] Title is clear and action-oriented -- [ ] Impact level matches the performance gain -- [ ] impactDescription includes quantification -- [ ] Explanation is concise (1-2 sentences) -- [ ] Has at least 1 **Incorrect** SQL example -- [ ] Has at least 1 **Correct** SQL example -- [ ] SQL uses semantic naming -- [ ] Comments explain _why_, not _what_ -- [ ] Trade-offs mentioned if applicable -- [ ] Reference links included -- [ ] `npm run validate` passes -- [ ] `npm run build` generates correct output diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/_sections.md b/.windsurf/skills/supabase-postgres-best-practices/references/_sections.md deleted file mode 100644 index 8ba57c23..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/_sections.md +++ /dev/null @@ -1,39 +0,0 @@ -# Section Definitions - -This file defines the rule categories for Postgres best practices. Rules are automatically assigned to sections based on their filename prefix. - -Take the examples below as pure demonstrative. Replace each section with the actual rule categories for Postgres best practices. - ---- - -## 1. Query Performance (query) -**Impact:** CRITICAL -**Description:** Slow queries, missing indexes, inefficient query plans. The most common source of Postgres performance issues. - -## 2. Connection Management (conn) -**Impact:** CRITICAL -**Description:** Connection pooling, limits, and serverless strategies. Critical for applications with high concurrency or serverless deployments. - -## 3. Security & RLS (security) -**Impact:** CRITICAL -**Description:** Row-Level Security policies, privilege management, and authentication patterns. - -## 4. Schema Design (schema) -**Impact:** HIGH -**Description:** Table design, index strategies, partitioning, and data type selection. Foundation for long-term performance. - -## 5. Concurrency & Locking (lock) -**Impact:** MEDIUM-HIGH -**Description:** Transaction management, isolation levels, deadlock prevention, and lock contention patterns. - -## 6. Data Access Patterns (data) -**Impact:** MEDIUM -**Description:** N+1 query elimination, batch operations, cursor-based pagination, and efficient data fetching. - -## 7. Monitoring & Diagnostics (monitor) -**Impact:** LOW-MEDIUM -**Description:** Using pg_stat_statements, EXPLAIN ANALYZE, metrics collection, and performance diagnostics. - -## 8. Advanced Features (advanced) -**Impact:** LOW -**Description:** Full-text search, JSONB optimization, PostGIS, extensions, and advanced Postgres features. diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/_template.md b/.windsurf/skills/supabase-postgres-best-practices/references/_template.md deleted file mode 100644 index 91ace90e..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/_template.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Clear, Action-Oriented Title (e.g., "Use Partial Indexes for Filtered Queries") -impact: MEDIUM -impactDescription: 5-20x query speedup for filtered queries -tags: indexes, query-optimization, performance ---- - -## [Rule Title] - -[1-2 sentence explanation of the problem and why it matters. Focus on performance impact.] - -**Incorrect (describe the problem):** - -```sql --- Comment explaining what makes this slow/problematic -CREATE INDEX users_email_idx ON users(email); - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- This scans deleted records unnecessarily -``` - -**Correct (describe the solution):** - -```sql --- Comment explaining why this is better -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- Only indexes active users, 10x smaller index, faster queries -``` - -[Optional: Additional context, edge cases, or trade-offs] - -Reference: [Postgres Docs](https://www.postgresql.org/docs/current/) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md b/.windsurf/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md deleted file mode 100644 index 582cbeaa..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use tsvector for Full-Text Search -impact: MEDIUM -impactDescription: 100x faster than LIKE, with ranking support -tags: full-text-search, tsvector, gin, search ---- - -## Use tsvector for Full-Text Search - -LIKE with wildcards can't use indexes. Full-text search with tsvector is orders of magnitude faster. - -**Incorrect (LIKE pattern matching):** - -```sql --- Cannot use index, scans all rows -select * from articles where content like '%postgresql%'; - --- Case-insensitive makes it worse -select * from articles where lower(content) like '%postgresql%'; -``` - -**Correct (full-text search with tsvector):** - -```sql --- Add tsvector column and index -alter table articles add column search_vector tsvector - generated always as (to_tsvector('english', coalesce(title,'') || ' ' || coalesce(content,''))) stored; - -create index articles_search_idx on articles using gin (search_vector); - --- Fast full-text search -select * from articles -where search_vector @@ to_tsquery('english', 'postgresql & performance'); - --- With ranking -select *, ts_rank(search_vector, query) as rank -from articles, to_tsquery('english', 'postgresql') query -where search_vector @@ query -order by rank desc; -``` - -Search multiple terms: - -```sql --- AND: both terms required -to_tsquery('postgresql & performance') - --- OR: either term -to_tsquery('postgresql | mysql') - --- Prefix matching -to_tsquery('post:*') -``` - -Reference: [Full Text Search](https://supabase.com/docs/guides/database/full-text-search) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md b/.windsurf/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md deleted file mode 100644 index e3d261ea..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Index JSONB Columns for Efficient Querying -impact: MEDIUM -impactDescription: 10-100x faster JSONB queries with proper indexing -tags: jsonb, gin, indexes, json ---- - -## Index JSONB Columns for Efficient Querying - -JSONB queries without indexes scan the entire table. Use GIN indexes for containment queries. - -**Incorrect (no index on JSONB):** - -```sql -create table products ( - id bigint primary key, - attributes jsonb -); - --- Full table scan for every query -select * from products where attributes @> '{"color": "red"}'; -select * from products where attributes->>'brand' = 'Nike'; -``` - -**Correct (GIN index for JSONB):** - -```sql --- GIN index for containment operators (@>, ?, ?&, ?|) -create index products_attrs_gin on products using gin (attributes); - --- Now containment queries use the index -select * from products where attributes @> '{"color": "red"}'; - --- For specific key lookups, use expression index -create index products_brand_idx on products ((attributes->>'brand')); -select * from products where attributes->>'brand' = 'Nike'; -``` - -Choose the right operator class: - -```sql --- jsonb_ops (default): supports all operators, larger index -create index idx1 on products using gin (attributes); - --- jsonb_path_ops: only @> operator, but 2-3x smaller index -create index idx2 on products using gin (attributes jsonb_path_ops); -``` - -Reference: [JSONB Indexes](https://www.postgresql.org/docs/current/datatype-json.html#JSON-INDEXING) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md b/.windsurf/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md deleted file mode 100644 index 40b9cc50..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Configure Idle Connection Timeouts -impact: HIGH -impactDescription: Reclaim 30-50% of connection slots from idle clients -tags: connections, timeout, idle, resource-management ---- - -## Configure Idle Connection Timeouts - -Idle connections waste resources. Configure timeouts to automatically reclaim them. - -**Incorrect (connections held indefinitely):** - -```sql --- No timeout configured -show idle_in_transaction_session_timeout; -- 0 (disabled) - --- Connections stay open forever, even when idle -select pid, state, state_change, query -from pg_stat_activity -where state = 'idle in transaction'; --- Shows transactions idle for hours, holding locks -``` - -**Correct (automatic cleanup of idle connections):** - -```sql --- Terminate connections idle in transaction after 30 seconds -alter system set idle_in_transaction_session_timeout = '30s'; - --- Terminate completely idle connections after 10 minutes -alter system set idle_session_timeout = '10min'; - --- Reload configuration -select pg_reload_conf(); -``` - -For pooled connections, configure at the pooler level: - -```ini -# pgbouncer.ini -server_idle_timeout = 60 -client_idle_timeout = 300 -``` - -Reference: [Connection Timeouts](https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/conn-limits.md b/.windsurf/skills/supabase-postgres-best-practices/references/conn-limits.md deleted file mode 100644 index cb3e400c..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/conn-limits.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Set Appropriate Connection Limits -impact: CRITICAL -impactDescription: Prevent database crashes and memory exhaustion -tags: connections, max-connections, limits, stability ---- - -## Set Appropriate Connection Limits - -Too many connections exhaust memory and degrade performance. Set limits based on available resources. - -**Incorrect (unlimited or excessive connections):** - -```sql --- Default max_connections = 100, but often increased blindly -show max_connections; -- 500 (way too high for 4GB RAM) - --- Each connection uses 1-3MB RAM --- 500 connections * 2MB = 1GB just for connections! --- Out of memory errors under load -``` - -**Correct (calculate based on resources):** - -```sql --- Formula: max_connections = (RAM in MB / 5MB per connection) - reserved --- For 4GB RAM: (4096 / 5) - 10 = ~800 theoretical max --- But practically, 100-200 is better for query performance - --- Recommended settings for 4GB RAM -alter system set max_connections = 100; - --- Also set work_mem appropriately --- work_mem * max_connections should not exceed 25% of RAM -alter system set work_mem = '8MB'; -- 8MB * 100 = 800MB max -``` - -Monitor connection usage: - -```sql -select count(*), state from pg_stat_activity group by state; -``` - -Reference: [Database Connections](https://supabase.com/docs/guides/platform/performance#connection-management) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/conn-pooling.md b/.windsurf/skills/supabase-postgres-best-practices/references/conn-pooling.md deleted file mode 100644 index e2ebd581..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/conn-pooling.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Use Connection Pooling for All Applications -impact: CRITICAL -impactDescription: Handle 10-100x more concurrent users -tags: connection-pooling, pgbouncer, performance, scalability ---- - -## Use Connection Pooling for All Applications - -Postgres connections are expensive (1-3MB RAM each). Without pooling, applications exhaust connections under load. - -**Incorrect (new connection per request):** - -```sql --- Each request creates a new connection --- Application code: db.connect() per request --- Result: 500 concurrent users = 500 connections = crashed database - --- Check current connections -select count(*) from pg_stat_activity; -- 487 connections! -``` - -**Correct (connection pooling):** - -```sql --- Use a pooler like PgBouncer between app and database --- Application connects to pooler, pooler reuses a small pool to Postgres - --- Configure pool_size based on: (CPU cores * 2) + spindle_count --- Example for 4 cores: pool_size = 10 - --- Result: 500 concurrent users share 10 actual connections -select count(*) from pg_stat_activity; -- 10 connections -``` - -Pool modes: - -- **Transaction mode**: connection returned after each transaction (best for most apps) -- **Session mode**: connection held for entire session (needed for prepared statements, temp tables) - -Reference: [Connection Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pooler) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md b/.windsurf/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md deleted file mode 100644 index 555547d8..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Use Prepared Statements Correctly with Pooling -impact: HIGH -impactDescription: Avoid prepared statement conflicts in pooled environments -tags: prepared-statements, connection-pooling, transaction-mode ---- - -## Use Prepared Statements Correctly with Pooling - -Prepared statements are tied to individual database connections. In transaction-mode pooling, connections are shared, causing conflicts. - -**Incorrect (named prepared statements with transaction pooling):** - -```sql --- Named prepared statement -prepare get_user as select * from users where id = $1; - --- In transaction mode pooling, next request may get different connection -execute get_user(123); --- ERROR: prepared statement "get_user" does not exist -``` - -**Correct (use unnamed statements or session mode):** - -```sql --- Option 1: Use unnamed prepared statements (most ORMs do this automatically) --- The query is prepared and executed in a single protocol message - --- Option 2: Deallocate after use in transaction mode -prepare get_user as select * from users where id = $1; -execute get_user(123); -deallocate get_user; - --- Option 3: Use session mode pooling (port 5432 vs 6543) --- Connection is held for entire session, prepared statements persist -``` - -Check your driver settings: - -```sql --- Many drivers use prepared statements by default --- Node.js pg: { prepare: false } to disable --- JDBC: prepareThreshold=0 to disable -``` - -Reference: [Prepared Statements with Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pool-modes) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/data-batch-inserts.md b/.windsurf/skills/supabase-postgres-best-practices/references/data-batch-inserts.md deleted file mode 100644 index 997947cb..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/data-batch-inserts.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Batch INSERT Statements for Bulk Data -impact: MEDIUM -impactDescription: 10-50x faster bulk inserts -tags: batch, insert, bulk, performance, copy ---- - -## Batch INSERT Statements for Bulk Data - -Individual INSERT statements have high overhead. Batch multiple rows in single statements or use COPY. - -**Incorrect (individual inserts):** - -```sql --- Each insert is a separate transaction and round trip -insert into events (user_id, action) values (1, 'click'); -insert into events (user_id, action) values (1, 'view'); -insert into events (user_id, action) values (2, 'click'); --- ... 1000 more individual inserts - --- 1000 inserts = 1000 round trips = slow -``` - -**Correct (batch insert):** - -```sql --- Multiple rows in single statement -insert into events (user_id, action) values - (1, 'click'), - (1, 'view'), - (2, 'click'), - -- ... up to ~1000 rows per batch - (999, 'view'); - --- One round trip for 1000 rows -``` - -For large imports, use COPY: - -```sql --- COPY is fastest for bulk loading -copy events (user_id, action, created_at) -from '/path/to/data.csv' -with (format csv, header true); - --- Or from stdin in application -copy events (user_id, action) from stdin with (format csv); -1,click -1,view -2,click -\. -``` - -Reference: [COPY](https://www.postgresql.org/docs/current/sql-copy.html) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/data-n-plus-one.md b/.windsurf/skills/supabase-postgres-best-practices/references/data-n-plus-one.md deleted file mode 100644 index 2109186f..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/data-n-plus-one.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Eliminate N+1 Queries with Batch Loading -impact: MEDIUM-HIGH -impactDescription: 10-100x fewer database round trips -tags: n-plus-one, batch, performance, queries ---- - -## Eliminate N+1 Queries with Batch Loading - -N+1 queries execute one query per item in a loop. Batch them into a single query using arrays or JOINs. - -**Incorrect (N+1 queries):** - -```sql --- First query: get all users -select id from users where active = true; -- Returns 100 IDs - --- Then N queries, one per user -select * from orders where user_id = 1; -select * from orders where user_id = 2; -select * from orders where user_id = 3; --- ... 97 more queries! - --- Total: 101 round trips to database -``` - -**Correct (single batch query):** - -```sql --- Collect IDs and query once with ANY -select * from orders where user_id = any(array[1, 2, 3, ...]); - --- Or use JOIN instead of loop -select u.id, u.name, o.* -from users u -left join orders o on o.user_id = u.id -where u.active = true; - --- Total: 1 round trip -``` - -Application pattern: - -```sql --- Instead of looping in application code: --- for user in users: db.query("SELECT * FROM orders WHERE user_id = $1", user.id) - --- Pass array parameter: -select * from orders where user_id = any($1::bigint[]); --- Application passes: [1, 2, 3, 4, 5, ...] -``` - -Reference: [N+1 Query Problem](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/data-pagination.md b/.windsurf/skills/supabase-postgres-best-practices/references/data-pagination.md deleted file mode 100644 index 633d8393..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/data-pagination.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use Cursor-Based Pagination Instead of OFFSET -impact: MEDIUM-HIGH -impactDescription: Consistent O(1) performance regardless of page depth -tags: pagination, cursor, keyset, offset, performance ---- - -## Use Cursor-Based Pagination Instead of OFFSET - -OFFSET-based pagination scans all skipped rows, getting slower on deeper pages. Cursor pagination is O(1). - -**Incorrect (OFFSET pagination):** - -```sql --- Page 1: scans 20 rows -select * from products order by id limit 20 offset 0; - --- Page 100: scans 2000 rows to skip 1980 -select * from products order by id limit 20 offset 1980; - --- Page 10000: scans 200,000 rows! -select * from products order by id limit 20 offset 199980; -``` - -**Correct (cursor/keyset pagination):** - -```sql --- Page 1: get first 20 -select * from products order by id limit 20; --- Application stores last_id = 20 - --- Page 2: start after last ID -select * from products where id > 20 order by id limit 20; --- Uses index, always fast regardless of page depth - --- Page 10000: same speed as page 1 -select * from products where id > 199980 order by id limit 20; -``` - -For multi-column sorting: - -```sql --- Cursor must include all sort columns -select * from products -where (created_at, id) > ('2024-01-15 10:00:00', 12345) -order by created_at, id -limit 20; -``` - -Reference: [Pagination](https://supabase.com/docs/guides/database/pagination) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/data-upsert.md b/.windsurf/skills/supabase-postgres-best-practices/references/data-upsert.md deleted file mode 100644 index bc95e230..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/data-upsert.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use UPSERT for Insert-or-Update Operations -impact: MEDIUM -impactDescription: Atomic operation, eliminates race conditions -tags: upsert, on-conflict, insert, update ---- - -## Use UPSERT for Insert-or-Update Operations - -Using separate SELECT-then-INSERT/UPDATE creates race conditions. Use INSERT ... ON CONFLICT for atomic upserts. - -**Incorrect (check-then-insert race condition):** - -```sql --- Race condition: two requests check simultaneously -select * from settings where user_id = 123 and key = 'theme'; --- Both find nothing - --- Both try to insert -insert into settings (user_id, key, value) values (123, 'theme', 'dark'); --- One succeeds, one fails with duplicate key error! -``` - -**Correct (atomic UPSERT):** - -```sql --- Single atomic operation -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value, updated_at = now(); - --- Returns the inserted/updated row -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value -returning *; -``` - -Insert-or-ignore pattern: - -```sql --- Insert only if not exists (no update) -insert into page_views (page_id, user_id) -values (1, 123) -on conflict (page_id, user_id) do nothing; -``` - -Reference: [INSERT ON CONFLICT](https://www.postgresql.org/docs/current/sql-insert.html#SQL-ON-CONFLICT) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/lock-advisory.md b/.windsurf/skills/supabase-postgres-best-practices/references/lock-advisory.md deleted file mode 100644 index 572eaf0d..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/lock-advisory.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Use Advisory Locks for Application-Level Locking -impact: MEDIUM -impactDescription: Efficient coordination without row-level lock overhead -tags: advisory-locks, coordination, application-locks ---- - -## Use Advisory Locks for Application-Level Locking - -Advisory locks provide application-level coordination without requiring database rows to lock. - -**Incorrect (creating rows just for locking):** - -```sql --- Creating dummy rows to lock on -create table resource_locks ( - resource_name text primary key -); - -insert into resource_locks values ('report_generator'); - --- Lock by selecting the row -select * from resource_locks where resource_name = 'report_generator' for update; -``` - -**Correct (advisory locks):** - -```sql --- Session-level advisory lock (released on disconnect or unlock) -select pg_advisory_lock(hashtext('report_generator')); --- ... do exclusive work ... -select pg_advisory_unlock(hashtext('report_generator')); - --- Transaction-level lock (released on commit/rollback) -begin; -select pg_advisory_xact_lock(hashtext('daily_report')); --- ... do work ... -commit; -- Lock automatically released -``` - -Try-lock for non-blocking operations: - -```sql --- Returns immediately with true/false instead of waiting -select pg_try_advisory_lock(hashtext('resource_name')); - --- Use in application -if (acquired) { - -- Do work - select pg_advisory_unlock(hashtext('resource_name')); -} else { - -- Skip or retry later -} -``` - -Reference: [Advisory Locks](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md b/.windsurf/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md deleted file mode 100644 index 974da5ed..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Prevent Deadlocks with Consistent Lock Ordering -impact: MEDIUM-HIGH -impactDescription: Eliminate deadlock errors, improve reliability -tags: deadlocks, locking, transactions, ordering ---- - -## Prevent Deadlocks with Consistent Lock Ordering - -Deadlocks occur when transactions lock resources in different orders. Always -acquire locks in a consistent order. - -**Incorrect (inconsistent lock ordering):** - -```sql --- Transaction A -- Transaction B -begin; begin; -update accounts update accounts -set balance = balance - 100 set balance = balance - 50 -where id = 1; where id = 2; -- B locks row 2 - -update accounts update accounts -set balance = balance + 100 set balance = balance + 50 -where id = 2; -- A waits for B where id = 1; -- B waits for A - --- DEADLOCK! Both waiting for each other -``` - -**Correct (lock rows in consistent order first):** - -```sql --- Explicitly acquire locks in ID order before updating -begin; -select * from accounts where id in (1, 2) order by id for update; - --- Now perform updates in any order - locks already held -update accounts set balance = balance - 100 where id = 1; -update accounts set balance = balance + 100 where id = 2; -commit; -``` - -Alternative: use a single statement to update atomically: - -```sql --- Single statement acquires all locks atomically -begin; -update accounts -set balance = balance + case id - when 1 then -100 - when 2 then 100 -end -where id in (1, 2); -commit; -``` - -Detect deadlocks in logs: - -```sql --- Check for recent deadlocks -select * from pg_stat_database where deadlocks > 0; - --- Enable deadlock logging -set log_lock_waits = on; -set deadlock_timeout = '1s'; -``` - -Reference: -[Deadlocks](https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-DEADLOCKS) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/lock-short-transactions.md b/.windsurf/skills/supabase-postgres-best-practices/references/lock-short-transactions.md deleted file mode 100644 index e6b8ef26..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/lock-short-transactions.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Keep Transactions Short to Reduce Lock Contention -impact: MEDIUM-HIGH -impactDescription: 3-5x throughput improvement, fewer deadlocks -tags: transactions, locking, contention, performance ---- - -## Keep Transactions Short to Reduce Lock Contention - -Long-running transactions hold locks that block other queries. Keep transactions as short as possible. - -**Incorrect (long transaction with external calls):** - -```sql -begin; -select * from orders where id = 1 for update; -- Lock acquired - --- Application makes HTTP call to payment API (2-5 seconds) --- Other queries on this row are blocked! - -update orders set status = 'paid' where id = 1; -commit; -- Lock held for entire duration -``` - -**Correct (minimal transaction scope):** - -```sql --- Validate data and call APIs outside transaction --- Application: response = await paymentAPI.charge(...) - --- Only hold lock for the actual update -begin; -update orders -set status = 'paid', payment_id = $1 -where id = $2 and status = 'pending' -returning *; -commit; -- Lock held for milliseconds -``` - -Use `statement_timeout` to prevent runaway transactions: - -```sql --- Abort queries running longer than 30 seconds -set statement_timeout = '30s'; - --- Or per-session -set local statement_timeout = '5s'; -``` - -Reference: [Transaction Management](https://www.postgresql.org/docs/current/tutorial-transactions.html) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/lock-skip-locked.md b/.windsurf/skills/supabase-postgres-best-practices/references/lock-skip-locked.md deleted file mode 100644 index 77bdbb97..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/lock-skip-locked.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Use SKIP LOCKED for Non-Blocking Queue Processing -impact: MEDIUM-HIGH -impactDescription: 10x throughput for worker queues -tags: skip-locked, queue, workers, concurrency ---- - -## Use SKIP LOCKED for Non-Blocking Queue Processing - -When multiple workers process a queue, SKIP LOCKED allows workers to process different rows without waiting. - -**Incorrect (workers block each other):** - -```sql --- Worker 1 and Worker 2 both try to get next job -begin; -select * from jobs where status = 'pending' order by created_at limit 1 for update; --- Worker 2 waits for Worker 1's lock to release! -``` - -**Correct (SKIP LOCKED for parallel processing):** - -```sql --- Each worker skips locked rows and gets the next available -begin; -select * from jobs -where status = 'pending' -order by created_at -limit 1 -for update skip locked; - --- Worker 1 gets job 1, Worker 2 gets job 2 (no waiting) - -update jobs set status = 'processing' where id = $1; -commit; -``` - -Complete queue pattern: - -```sql --- Atomic claim-and-update in one statement -update jobs -set status = 'processing', worker_id = $1, started_at = now() -where id = ( - select id from jobs - where status = 'pending' - order by created_at - limit 1 - for update skip locked -) -returning *; -``` - -Reference: [SELECT FOR UPDATE SKIP LOCKED](https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md b/.windsurf/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md deleted file mode 100644 index 542978c3..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use EXPLAIN ANALYZE to Diagnose Slow Queries -impact: LOW-MEDIUM -impactDescription: Identify exact bottlenecks in query execution -tags: explain, analyze, diagnostics, query-plan ---- - -## Use EXPLAIN ANALYZE to Diagnose Slow Queries - -EXPLAIN ANALYZE executes the query and shows actual timings, revealing the true performance bottlenecks. - -**Incorrect (guessing at performance issues):** - -```sql --- Query is slow, but why? -select * from orders where customer_id = 123 and status = 'pending'; --- "It must be missing an index" - but which one? -``` - -**Correct (use EXPLAIN ANALYZE):** - -```sql -explain (analyze, buffers, format text) -select * from orders where customer_id = 123 and status = 'pending'; - --- Output reveals the issue: --- Seq Scan on orders (cost=0.00..25000.00 rows=50 width=100) (actual time=0.015..450.123 rows=50 loops=1) --- Filter: ((customer_id = 123) AND (status = 'pending'::text)) --- Rows Removed by Filter: 999950 --- Buffers: shared hit=5000 read=15000 --- Planning Time: 0.150 ms --- Execution Time: 450.500 ms -``` - -Key things to look for: - -```sql --- Seq Scan on large tables = missing index --- Rows Removed by Filter = poor selectivity or missing index --- Buffers: read >> hit = data not cached, needs more memory --- Nested Loop with high loops = consider different join strategy --- Sort Method: external merge = work_mem too low -``` - -Reference: [EXPLAIN](https://supabase.com/docs/guides/database/inspect) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md b/.windsurf/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md deleted file mode 100644 index d7e82f1a..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Enable pg_stat_statements for Query Analysis -impact: LOW-MEDIUM -impactDescription: Identify top resource-consuming queries -tags: pg-stat-statements, monitoring, statistics, performance ---- - -## Enable pg_stat_statements for Query Analysis - -pg_stat_statements tracks execution statistics for all queries, helping identify slow and frequent queries. - -**Incorrect (no visibility into query patterns):** - -```sql --- Database is slow, but which queries are the problem? --- No way to know without pg_stat_statements -``` - -**Correct (enable and query pg_stat_statements):** - -```sql --- Enable the extension -create extension if not exists pg_stat_statements; - --- Find slowest queries by total time -select - calls, - round(total_exec_time::numeric, 2) as total_time_ms, - round(mean_exec_time::numeric, 2) as mean_time_ms, - query -from pg_stat_statements -order by total_exec_time desc -limit 10; - --- Find most frequent queries -select calls, query -from pg_stat_statements -order by calls desc -limit 10; - --- Reset statistics after optimization -select pg_stat_statements_reset(); -``` - -Key metrics to monitor: - -```sql --- Queries with high mean time (candidates for optimization) -select query, mean_exec_time, calls -from pg_stat_statements -where mean_exec_time > 100 -- > 100ms average -order by mean_exec_time desc; -``` - -Reference: [pg_stat_statements](https://supabase.com/docs/guides/database/extensions/pg_stat_statements) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md b/.windsurf/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md deleted file mode 100644 index e0e8ea0b..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Maintain Table Statistics with VACUUM and ANALYZE -impact: MEDIUM -impactDescription: 2-10x better query plans with accurate statistics -tags: vacuum, analyze, statistics, maintenance, autovacuum ---- - -## Maintain Table Statistics with VACUUM and ANALYZE - -Outdated statistics cause the query planner to make poor decisions. VACUUM reclaims space, ANALYZE updates statistics. - -**Incorrect (stale statistics):** - -```sql --- Table has 1M rows but stats say 1000 --- Query planner chooses wrong strategy -explain select * from orders where status = 'pending'; --- Shows: Seq Scan (because stats show small table) --- Actually: Index Scan would be much faster -``` - -**Correct (maintain fresh statistics):** - -```sql --- Manually analyze after large data changes -analyze orders; - --- Analyze specific columns used in WHERE clauses -analyze orders (status, created_at); - --- Check when tables were last analyzed -select - relname, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze -from pg_stat_user_tables -order by last_analyze nulls first; -``` - -Autovacuum tuning for busy tables: - -```sql --- Increase frequency for high-churn tables -alter table orders set ( - autovacuum_vacuum_scale_factor = 0.05, -- Vacuum at 5% dead tuples (default 20%) - autovacuum_analyze_scale_factor = 0.02 -- Analyze at 2% changes (default 10%) -); - --- Check autovacuum status -select * from pg_stat_progress_vacuum; -``` - -Reference: [VACUUM](https://supabase.com/docs/guides/database/database-size#vacuum-operations) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/query-composite-indexes.md b/.windsurf/skills/supabase-postgres-best-practices/references/query-composite-indexes.md deleted file mode 100644 index fea64523..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/query-composite-indexes.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Create Composite Indexes for Multi-Column Queries -impact: HIGH -impactDescription: 5-10x faster multi-column queries -tags: indexes, composite-index, multi-column, query-optimization ---- - -## Create Composite Indexes for Multi-Column Queries - -When queries filter on multiple columns, a composite index is more efficient than separate single-column indexes. - -**Incorrect (separate indexes require bitmap scan):** - -```sql --- Two separate indexes -create index orders_status_idx on orders (status); -create index orders_created_idx on orders (created_at); - --- Query must combine both indexes (slower) -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Correct (composite index):** - -```sql --- Single composite index (leftmost column first for equality checks) -create index orders_status_created_idx on orders (status, created_at); - --- Query uses one efficient index scan -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Column order matters** - place equality columns first, range columns last: - -```sql --- Good: status (=) before created_at (>) -create index idx on orders (status, created_at); - --- Works for: WHERE status = 'pending' --- Works for: WHERE status = 'pending' AND created_at > '2024-01-01' --- Does NOT work for: WHERE created_at > '2024-01-01' (leftmost prefix rule) -``` - -Reference: [Multicolumn Indexes](https://www.postgresql.org/docs/current/indexes-multicolumn.html) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/query-covering-indexes.md b/.windsurf/skills/supabase-postgres-best-practices/references/query-covering-indexes.md deleted file mode 100644 index 9d2a4947..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/query-covering-indexes.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Use Covering Indexes to Avoid Table Lookups -impact: MEDIUM-HIGH -impactDescription: 2-5x faster queries by eliminating heap fetches -tags: indexes, covering-index, include, index-only-scan ---- - -## Use Covering Indexes to Avoid Table Lookups - -Covering indexes include all columns needed by a query, enabling index-only scans that skip the table entirely. - -**Incorrect (index scan + heap fetch):** - -```sql -create index users_email_idx on users (email); - --- Must fetch name and created_at from table heap -select email, name, created_at from users where email = 'user@example.com'; -``` - -**Correct (index-only scan with INCLUDE):** - -```sql --- Include non-searchable columns in the index -create index users_email_idx on users (email) include (name, created_at); - --- All columns served from index, no table access needed -select email, name, created_at from users where email = 'user@example.com'; -``` - -Use INCLUDE for columns you SELECT but don't filter on: - -```sql --- Searching by status, but also need customer_id and total -create index orders_status_idx on orders (status) include (customer_id, total); - -select status, customer_id, total from orders where status = 'shipped'; -``` - -Reference: [Index-Only Scans](https://www.postgresql.org/docs/current/indexes-index-only-scans.html) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/query-index-types.md b/.windsurf/skills/supabase-postgres-best-practices/references/query-index-types.md deleted file mode 100644 index 93b32590..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/query-index-types.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Choose the Right Index Type for Your Data -impact: HIGH -impactDescription: 10-100x improvement with correct index type -tags: indexes, btree, gin, gist, brin, hash, index-types ---- - -## Choose the Right Index Type for Your Data - -Different index types excel at different query patterns. The default B-tree isn't always optimal. - -**Incorrect (B-tree for JSONB containment):** - -```sql --- B-tree cannot optimize containment operators -create index products_attrs_idx on products (attributes); -select * from products where attributes @> '{"color": "red"}'; --- Full table scan - B-tree doesn't support @> operator -``` - -**Correct (GIN for JSONB):** - -```sql --- GIN supports @>, ?, ?&, ?| operators -create index products_attrs_idx on products using gin (attributes); -select * from products where attributes @> '{"color": "red"}'; -``` - -Index type guide: - -```sql --- B-tree (default): =, <, >, BETWEEN, IN, IS NULL -create index users_created_idx on users (created_at); - --- GIN: arrays, JSONB, full-text search -create index posts_tags_idx on posts using gin (tags); - --- GiST: geometric data, range types, nearest-neighbor (KNN) queries -create index locations_idx on places using gist (location); - --- BRIN: large time-series tables (10-100x smaller) -create index events_time_idx on events using brin (created_at); - --- Hash: equality-only (slightly faster than B-tree for =) -create index sessions_token_idx on sessions using hash (token); -``` - -Reference: [Index Types](https://www.postgresql.org/docs/current/indexes-types.html) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/query-missing-indexes.md b/.windsurf/skills/supabase-postgres-best-practices/references/query-missing-indexes.md deleted file mode 100644 index e6daace7..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/query-missing-indexes.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Add Indexes on WHERE and JOIN Columns -impact: CRITICAL -impactDescription: 100-1000x faster queries on large tables -tags: indexes, performance, sequential-scan, query-optimization ---- - -## Add Indexes on WHERE and JOIN Columns - -Queries filtering or joining on unindexed columns cause full table scans, which become exponentially slower as tables grow. - -**Incorrect (sequential scan on large table):** - -```sql --- No index on customer_id causes full table scan -select * from orders where customer_id = 123; - --- EXPLAIN shows: Seq Scan on orders (cost=0.00..25000.00 rows=100 width=85) -``` - -**Correct (index scan):** - -```sql --- Create index on frequently filtered column -create index orders_customer_id_idx on orders (customer_id); - -select * from orders where customer_id = 123; - --- EXPLAIN shows: Index Scan using orders_customer_id_idx (cost=0.42..8.44 rows=100 width=85) -``` - -For JOIN columns, always index the foreign key side: - -```sql --- Index the referencing column -create index orders_customer_id_idx on orders (customer_id); - -select c.name, o.total -from customers c -join orders o on o.customer_id = c.id; -``` - -Reference: [Query Optimization](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/query-partial-indexes.md b/.windsurf/skills/supabase-postgres-best-practices/references/query-partial-indexes.md deleted file mode 100644 index 3e61a341..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/query-partial-indexes.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use Partial Indexes for Filtered Queries -impact: HIGH -impactDescription: 5-20x smaller indexes, faster writes and queries -tags: indexes, partial-index, query-optimization, storage ---- - -## Use Partial Indexes for Filtered Queries - -Partial indexes only include rows matching a WHERE condition, making them smaller and faster when queries consistently filter on the same condition. - -**Incorrect (full index includes irrelevant rows):** - -```sql --- Index includes all rows, even soft-deleted ones -create index users_email_idx on users (email); - --- Query always filters active users -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -**Correct (partial index matches query filter):** - -```sql --- Index only includes active users -create index users_active_email_idx on users (email) -where deleted_at is null; - --- Query uses the smaller, faster index -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -Common use cases for partial indexes: - -```sql --- Only pending orders (status rarely changes once completed) -create index orders_pending_idx on orders (created_at) -where status = 'pending'; - --- Only non-null values -create index products_sku_idx on products (sku) -where sku is not null; -``` - -Reference: [Partial Indexes](https://www.postgresql.org/docs/current/indexes-partial.html) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/schema-constraints.md b/.windsurf/skills/supabase-postgres-best-practices/references/schema-constraints.md deleted file mode 100644 index 1d2ef8f9..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/schema-constraints.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Add Constraints Safely in Migrations -impact: HIGH -impactDescription: Prevents migration failures and enables idempotent schema changes -tags: constraints, migrations, schema, alter-table ---- - -## Add Constraints Safely in Migrations - -PostgreSQL does not support `ADD CONSTRAINT IF NOT EXISTS`. Migrations using this syntax will fail. - -**Incorrect (causes syntax error):** - -```sql --- ERROR: syntax error at or near "not" (SQLSTATE 42601) -alter table public.profiles -add constraint if not exists profiles_birthchart_id_unique unique (birthchart_id); -``` - -**Correct (idempotent constraint creation):** - -```sql --- Use DO block to check before adding -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_unique' - and conrelid = 'public.profiles'::regclass - ) then - alter table public.profiles - add constraint profiles_birthchart_id_unique unique (birthchart_id); - end if; -end $$; -``` - -For all constraint types: - -```sql --- Check constraints -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'check_age_positive' - ) then - alter table users add constraint check_age_positive check (age > 0); - end if; -end $$; - --- Foreign keys -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_fkey' - ) then - alter table profiles - add constraint profiles_birthchart_id_fkey - foreign key (birthchart_id) references birthcharts(id); - end if; -end $$; -``` - -Check if constraint exists: - -```sql --- Query to check constraint existence -select conname, contype, pg_get_constraintdef(oid) -from pg_constraint -where conrelid = 'public.profiles'::regclass; - --- contype values: --- 'p' = PRIMARY KEY --- 'f' = FOREIGN KEY --- 'u' = UNIQUE --- 'c' = CHECK -``` - -Reference: [Constraints](https://www.postgresql.org/docs/current/ddl-constraints.html) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/schema-data-types.md b/.windsurf/skills/supabase-postgres-best-practices/references/schema-data-types.md deleted file mode 100644 index f253a581..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/schema-data-types.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Choose Appropriate Data Types -impact: HIGH -impactDescription: 50% storage reduction, faster comparisons -tags: data-types, schema, storage, performance ---- - -## Choose Appropriate Data Types - -Using the right data types reduces storage, improves query performance, and prevents bugs. - -**Incorrect (wrong data types):** - -```sql -create table users ( - id int, -- Will overflow at 2.1 billion - email varchar(255), -- Unnecessary length limit - created_at timestamp, -- Missing timezone info - is_active varchar(5), -- String for boolean - price varchar(20) -- String for numeric -); -``` - -**Correct (appropriate data types):** - -```sql -create table users ( - id bigint generated always as identity primary key, -- 9 quintillion max - email text, -- No artificial limit, same performance as varchar - created_at timestamptz, -- Always store timezone-aware timestamps - is_active boolean default true, -- 1 byte vs variable string length - price numeric(10,2) -- Exact decimal arithmetic -); -``` - -Key guidelines: - -```sql --- IDs: use bigint, not int (future-proofing) --- Strings: use text, not varchar(n) unless constraint needed --- Time: use timestamptz, not timestamp --- Money: use numeric, not float (precision matters) --- Enums: use text with check constraint or create enum type -``` - -Reference: [Data Types](https://www.postgresql.org/docs/current/datatype.html) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md b/.windsurf/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md deleted file mode 100644 index 6c3d6ff6..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Index Foreign Key Columns -impact: HIGH -impactDescription: 10-100x faster JOINs and CASCADE operations -tags: foreign-key, indexes, joins, schema ---- - -## Index Foreign Key Columns - -Postgres does not automatically index foreign key columns. Missing indexes cause slow JOINs and CASCADE operations. - -**Incorrect (unindexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- No index on customer_id! --- JOINs and ON DELETE CASCADE both require full table scan -select * from orders where customer_id = 123; -- Seq Scan -delete from customers where id = 123; -- Locks table, scans all orders -``` - -**Correct (indexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- Always index the FK column -create index orders_customer_id_idx on orders (customer_id); - --- Now JOINs and cascades are fast -select * from orders where customer_id = 123; -- Index Scan -delete from customers where id = 123; -- Uses index, fast cascade -``` - -Find missing FK indexes: - -```sql -select - conrelid::regclass as table_name, - a.attname as fk_column -from pg_constraint c -join pg_attribute a on a.attrelid = c.conrelid and a.attnum = any(c.conkey) -where c.contype = 'f' - and not exists ( - select 1 from pg_index i - where i.indrelid = c.conrelid and a.attnum = any(i.indkey) - ); -``` - -Reference: [Foreign Keys](https://www.postgresql.org/docs/current/ddl-constraints.html#DDL-CONSTRAINTS-FK) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md b/.windsurf/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md deleted file mode 100644 index f0072940..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use Lowercase Identifiers for Compatibility -impact: MEDIUM -impactDescription: Avoid case-sensitivity bugs with tools, ORMs, and AI assistants -tags: naming, identifiers, case-sensitivity, schema, conventions ---- - -## Use Lowercase Identifiers for Compatibility - -PostgreSQL folds unquoted identifiers to lowercase. Quoted mixed-case identifiers require quotes forever and cause issues with tools, ORMs, and AI assistants that may not recognize them. - -**Incorrect (mixed-case identifiers):** - -```sql --- Quoted identifiers preserve case but require quotes everywhere -CREATE TABLE "Users" ( - "userId" bigint PRIMARY KEY, - "firstName" text, - "lastName" text -); - --- Must always quote or queries fail -SELECT "firstName" FROM "Users" WHERE "userId" = 1; - --- This fails - Users becomes users without quotes -SELECT firstName FROM Users; --- ERROR: relation "users" does not exist -``` - -**Correct (lowercase snake_case):** - -```sql --- Unquoted lowercase identifiers are portable and tool-friendly -CREATE TABLE users ( - user_id bigint PRIMARY KEY, - first_name text, - last_name text -); - --- Works without quotes, recognized by all tools -SELECT first_name FROM users WHERE user_id = 1; -``` - -Common sources of mixed-case identifiers: - -```sql --- ORMs often generate quoted camelCase - configure them to use snake_case --- Migrations from other databases may preserve original casing --- Some GUI tools quote identifiers by default - disable this - --- If stuck with mixed-case, create views as a compatibility layer -CREATE VIEW users AS SELECT "userId" AS user_id, "firstName" AS first_name FROM "Users"; -``` - -Reference: [Identifiers and Key Words](https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/schema-partitioning.md b/.windsurf/skills/supabase-postgres-best-practices/references/schema-partitioning.md deleted file mode 100644 index 13137a03..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/schema-partitioning.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Partition Large Tables for Better Performance -impact: MEDIUM-HIGH -impactDescription: 5-20x faster queries and maintenance on large tables -tags: partitioning, large-tables, time-series, performance ---- - -## Partition Large Tables for Better Performance - -Partitioning splits a large table into smaller pieces, improving query performance and maintenance operations. - -**Incorrect (single large table):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz, - data jsonb -); - --- 500M rows, queries scan everything -select * from events where created_at > '2024-01-01'; -- Slow -vacuum events; -- Takes hours, locks table -``` - -**Correct (partitioned by time range):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz not null, - data jsonb -) partition by range (created_at); - --- Create partitions for each month -create table events_2024_01 partition of events - for values from ('2024-01-01') to ('2024-02-01'); - -create table events_2024_02 partition of events - for values from ('2024-02-01') to ('2024-03-01'); - --- Queries only scan relevant partitions -select * from events where created_at > '2024-01-15'; -- Only scans events_2024_01+ - --- Drop old data instantly -drop table events_2023_01; -- Instant vs DELETE taking hours -``` - -When to partition: - -- Tables > 100M rows -- Time-series data with date-based queries -- Need to efficiently drop old data - -Reference: [Table Partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/schema-primary-keys.md b/.windsurf/skills/supabase-postgres-best-practices/references/schema-primary-keys.md deleted file mode 100644 index fb0fbb16..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/schema-primary-keys.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Select Optimal Primary Key Strategy -impact: HIGH -impactDescription: Better index locality, reduced fragmentation -tags: primary-key, identity, uuid, serial, schema ---- - -## Select Optimal Primary Key Strategy - -Primary key choice affects insert performance, index size, and replication -efficiency. - -**Incorrect (problematic PK choices):** - -```sql --- identity is the SQL-standard approach -create table users ( - id serial primary key -- Works, but IDENTITY is recommended -); - --- Random UUIDs (v4) cause index fragmentation -create table orders ( - id uuid default gen_random_uuid() primary key -- UUIDv4 = random = scattered inserts -); -``` - -**Correct (optimal PK strategies):** - -```sql --- Use IDENTITY for sequential IDs (SQL-standard, best for most cases) -create table users ( - id bigint generated always as identity primary key -); - --- For distributed systems needing UUIDs, use UUIDv7 (time-ordered) --- Requires pg_uuidv7 extension: create extension pg_uuidv7; -create table orders ( - id uuid default uuid_generate_v7() primary key -- Time-ordered, no fragmentation -); - --- Alternative: time-prefixed IDs for sortable, distributed IDs (no extension needed) -create table events ( - id text default concat( - to_char(now() at time zone 'utc', 'YYYYMMDDHH24MISSMS'), - gen_random_uuid()::text - ) primary key -); -``` - -Guidelines: - -- Single database: `bigint identity` (sequential, 8 bytes, SQL-standard) -- Distributed/exposed IDs: UUIDv7 (requires pg_uuidv7) or ULID (time-ordered, no - fragmentation) -- `serial` works but `identity` is SQL-standard and preferred for new - applications -- Avoid random UUIDs (v4) as primary keys on large tables (causes index - fragmentation) - -Reference: -[Identity Columns](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-PARMS-GENERATED-IDENTITY) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/security-privileges.md b/.windsurf/skills/supabase-postgres-best-practices/references/security-privileges.md deleted file mode 100644 index 448ec345..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/security-privileges.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Apply Principle of Least Privilege -impact: MEDIUM -impactDescription: Reduced attack surface, better audit trail -tags: privileges, security, roles, permissions ---- - -## Apply Principle of Least Privilege - -Grant only the minimum permissions required. Never use superuser for application queries. - -**Incorrect (overly broad permissions):** - -```sql --- Application uses superuser connection --- Or grants ALL to application role -grant all privileges on all tables in schema public to app_user; -grant all privileges on all sequences in schema public to app_user; - --- Any SQL injection becomes catastrophic --- drop table users; cascades to everything -``` - -**Correct (minimal, specific grants):** - -```sql --- Create role with no default privileges -create role app_readonly nologin; - --- Grant only SELECT on specific tables -grant usage on schema public to app_readonly; -grant select on public.products, public.categories to app_readonly; - --- Create role for writes with limited scope -create role app_writer nologin; -grant usage on schema public to app_writer; -grant select, insert, update on public.orders to app_writer; -grant usage on sequence orders_id_seq to app_writer; --- No DELETE permission - --- Login role inherits from these -create role app_user login password 'xxx'; -grant app_writer to app_user; -``` - -Revoke public defaults: - -```sql --- Revoke default public access -revoke all on schema public from public; -revoke all on all tables in schema public from public; -``` - -Reference: [Roles and Privileges](https://supabase.com/blog/postgres-roles-and-privileges) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/security-rls-basics.md b/.windsurf/skills/supabase-postgres-best-practices/references/security-rls-basics.md deleted file mode 100644 index c61e1a85..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/security-rls-basics.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Enable Row Level Security for Multi-Tenant Data -impact: CRITICAL -impactDescription: Database-enforced tenant isolation, prevent data leaks -tags: rls, row-level-security, multi-tenant, security ---- - -## Enable Row Level Security for Multi-Tenant Data - -Row Level Security (RLS) enforces data access at the database level, ensuring users only see their own data. - -**Incorrect (application-level filtering only):** - -```sql --- Relying only on application to filter -select * from orders where user_id = $current_user_id; - --- Bug or bypass means all data is exposed! -select * from orders; -- Returns ALL orders -``` - -**Correct (database-enforced RLS):** - -```sql --- Enable RLS on the table -alter table orders enable row level security; - --- Create policy for users to see only their orders -create policy orders_user_policy on orders - for all - using (user_id = current_setting('app.current_user_id')::bigint); - --- Force RLS even for table owners -alter table orders force row level security; - --- Set user context and query -set app.current_user_id = '123'; -select * from orders; -- Only returns orders for user 123 -``` - -Policy for authenticated role: - -```sql -create policy orders_user_policy on orders - for all - to authenticated - using (user_id = auth.uid()); -``` - -Reference: [Row Level Security](https://supabase.com/docs/guides/database/postgres/row-level-security) diff --git a/.windsurf/skills/supabase-postgres-best-practices/references/security-rls-performance.md b/.windsurf/skills/supabase-postgres-best-practices/references/security-rls-performance.md deleted file mode 100644 index b32d92f7..00000000 --- a/.windsurf/skills/supabase-postgres-best-practices/references/security-rls-performance.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Optimize RLS Policies for Performance -impact: HIGH -impactDescription: 5-10x faster RLS queries with proper patterns -tags: rls, performance, security, optimization ---- - -## Optimize RLS Policies for Performance - -Poorly written RLS policies can cause severe performance issues. Use subqueries and indexes strategically. - -**Incorrect (function called for every row):** - -```sql -create policy orders_policy on orders - using (auth.uid() = user_id); -- auth.uid() called per row! - --- With 1M rows, auth.uid() is called 1M times -``` - -**Correct (wrap functions in SELECT):** - -```sql -create policy orders_policy on orders - using ((select auth.uid()) = user_id); -- Called once, cached - --- 100x+ faster on large tables -``` - -Use security definer functions for complex checks: - -```sql --- Create helper function (runs as definer, bypasses RLS) -create or replace function is_team_member(team_id bigint) -returns boolean -language sql -security definer -set search_path = '' -as $$ - select exists ( - select 1 from public.team_members - where team_id = $1 and user_id = (select auth.uid()) - ); -$$; - --- Use in policy (indexed lookup, not per-row check) -create policy team_orders_policy on orders - using ((select is_team_member(team_id))); -``` - -Always add indexes on columns used in RLS policies: - -```sql -create index orders_user_id_idx on orders (user_id); -``` - -Reference: [RLS Performance](https://supabase.com/docs/guides/database/postgres/row-level-security#rls-performance-recommendations) diff --git a/.zencoder/rules/repo.md b/.zencoder/rules/repo.md deleted file mode 100644 index a0b9c46b..00000000 --- a/.zencoder/rules/repo.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -description: Repository Information Overview -alwaysApply: true ---- - -# TradeHax Information - -## Summary -TradeHax is a Web3-focused project aimed at providing AI insights, portfolio tools, and NFT gaming. It leverages Solana RPC integration and is designed for crypto-native users. The project draws inspiration from Tesla and xAI's minimalist, tech-forward aesthetics, prioritizing dark themes, immersive visuals, and AI-driven interactive elements. - -## Structure -- **lib/**: Contains core utilities and logic, such as security and rate limiting. -- **.zencoder/ / .zenflow/**: Configuration and workflow definitions for AI-assisted development. -- **Root**: Contains project reports, deployment readiness checklists, and strategy documents. - -## Language & Runtime -**Language**: TypeScript -**Version**: ESNext (implied by Next.js usage) -**Build System**: Next.js (planned/targeted) -**Package Manager**: npm/pnpm (implied by Node.js environment) - -## Dependencies -**Main Dependencies**: -- `next`: Web framework (referenced in `lib/security.ts` and docs) -- `solana/web3.js`: Blockchain integration (planned) -- `three.js`: WebGL visuals for immersive elements (planned) -- `tailwind-merge` / `clsx`: Aesthetic styling (referenced in docs) - -**Development Dependencies**: -- `typescript`: Type safety -- `tailwindcss`: Utility-first CSS framework - -## Build & Installation -```bash -# Typical installation for this project type -npm install -# Typical development command -npm run dev -# Typical build command -npm run build -``` - -## Usage & Operations -**Key Strategies**: -- **Monetization**: Freemium models with AI insights, affiliate partnerships, and in-app sales for NFT gaming. -- **Deployment**: Targeted for Vercel with Namecheap DNS configuration (A record to 76.76.21.21). - -## Testing -**Framework**: Jest or Vitest (typical for Next.js projects) -**Test Location**: Likely `__tests__` or `*.test.ts` files within components/lib. -**Run Command**: -```bash -npm test -``` - -## Security -**Features**: -- **Rate Limiting**: Implemented in `lib/security.ts` using a memory-based store with support for custom limits and trusted origin verification. -- **Validation**: Sanitization of plain text and validation of ISO date strings and numbers. diff --git a/.zencoder/skills/supabase-postgres-best-practices/AGENTS.md b/.zencoder/skills/supabase-postgres-best-practices/AGENTS.md deleted file mode 100644 index a7baf445..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/AGENTS.md +++ /dev/null @@ -1,68 +0,0 @@ -# Supabase Postgres Best Practices - -## Structure - -``` -supabase-postgres-best-practices/ - SKILL.md # Main skill file - read this first - AGENTS.md # This navigation guide - CLAUDE.md # Symlink to AGENTS.md - references/ # Detailed reference files -``` - -## Usage - -1. Read `SKILL.md` for the main skill instructions -2. Browse `references/` for detailed documentation on specific topics -3. Reference files are loaded on-demand - read only what you need - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.zencoder/skills/supabase-postgres-best-practices/CLAUDE.md b/.zencoder/skills/supabase-postgres-best-practices/CLAUDE.md deleted file mode 100644 index 47dc3e3d..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/CLAUDE.md +++ /dev/null @@ -1 +0,0 @@ -AGENTS.md \ No newline at end of file diff --git a/.zencoder/skills/supabase-postgres-best-practices/README.md b/.zencoder/skills/supabase-postgres-best-practices/README.md deleted file mode 100644 index f1a374e1..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# Supabase Postgres Best Practices - Contributor Guide - -This skill contains Postgres performance optimization references optimized for -AI agents and LLMs. It follows the [Agent Skills Open Standard](https://agentskills.io/). - -## Quick Start - -```bash -# From repository root -npm install - -# Validate existing references -npm run validate - -# Build AGENTS.md -npm run build -``` - -## Creating a New Reference - -1. **Choose a section prefix** based on the category: - - `query-` Query Performance (CRITICAL) - - `conn-` Connection Management (CRITICAL) - - `security-` Security & RLS (CRITICAL) - - `schema-` Schema Design (HIGH) - - `lock-` Concurrency & Locking (MEDIUM-HIGH) - - `data-` Data Access Patterns (MEDIUM) - - `monitor-` Monitoring & Diagnostics (LOW-MEDIUM) - - `advanced-` Advanced Features (LOW) - -2. **Copy the template**: - ```bash - cp references/_template.md references/query-your-reference-name.md - ``` - -3. **Fill in the content** following the template structure - -4. **Validate and build**: - ```bash - npm run validate - npm run build - ``` - -5. **Review** the generated `AGENTS.md` - -## Skill Structure - -``` -skills/supabase-postgres-best-practices/ -β”œβ”€β”€ SKILL.md # Agent-facing skill manifest (Agent Skills spec) -β”œβ”€β”€ AGENTS.md # [GENERATED] Compiled references document -β”œβ”€β”€ README.md # This file -└── references/ - β”œβ”€β”€ _template.md # Reference template - β”œβ”€β”€ _sections.md # Section definitions - β”œβ”€β”€ _contributing.md # Writing guidelines - └── *.md # Individual references - -packages/skills-build/ -β”œβ”€β”€ src/ # Generic build system source -└── package.json # NPM scripts -``` - -## Reference File Structure - -See `references/_template.md` for the complete template. Key elements: - -````markdown ---- -title: Clear, Action-Oriented Title -impact: CRITICAL|HIGH|MEDIUM-HIGH|MEDIUM|LOW-MEDIUM|LOW -impactDescription: Quantified benefit (e.g., "10-100x faster") -tags: relevant, keywords ---- - -## [Title] - -[1-2 sentence explanation] - -**Incorrect (description):** - -```sql --- Comment explaining what's wrong -[Bad SQL example] -``` -```` - -**Correct (description):** - -```sql --- Comment explaining why this is better -[Good SQL example] -``` - -``` -## Writing Guidelines - -See `references/_contributing.md` for detailed guidelines. Key principles: - -1. **Show concrete transformations** - "Change X to Y", not abstract advice -2. **Error-first structure** - Show the problem before the solution -3. **Quantify impact** - Include specific metrics (10x faster, 50% smaller) -4. **Self-contained examples** - Complete, runnable SQL -5. **Semantic naming** - Use meaningful names (users, email), not (table1, col1) - -## Impact Levels - -| Level | Improvement | Examples | -|-------|-------------|----------| -| CRITICAL | 10-100x | Missing indexes, connection exhaustion | -| HIGH | 5-20x | Wrong index types, poor partitioning | -| MEDIUM-HIGH | 2-5x | N+1 queries, RLS optimization | -| MEDIUM | 1.5-3x | Redundant indexes, stale statistics | -| LOW-MEDIUM | 1.2-2x | VACUUM tuning, config tweaks | -| LOW | Incremental | Advanced patterns, edge cases | -``` diff --git a/.zencoder/skills/supabase-postgres-best-practices/SKILL.md b/.zencoder/skills/supabase-postgres-best-practices/SKILL.md deleted file mode 100644 index f80be156..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/SKILL.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -name: supabase-postgres-best-practices -description: Postgres performance optimization and best practices from Supabase. Use this skill when writing, reviewing, or optimizing Postgres queries, schema designs, or database configurations. -license: MIT -metadata: - author: supabase - version: "1.1.0" - organization: Supabase - date: January 2026 - abstract: Comprehensive Postgres performance optimization guide for developers using Supabase and Postgres. Contains performance rules across 8 categories, prioritized by impact from critical (query performance, connection management) to incremental (advanced features). Each rule includes detailed explanations, incorrect vs. correct SQL examples, query plan analysis, and specific performance metrics to guide automated optimization and code generation. ---- - -# Supabase Postgres Best Practices - -Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. - -## When to Apply - -Reference these guidelines when: -- Writing SQL queries or designing schemas -- Implementing indexes or query optimization -- Reviewing database performance issues -- Configuring connection pooling or scaling -- Optimizing for Postgres-specific features -- Working with Row-Level Security (RLS) - -## Rule Categories by Priority - -| Priority | Category | Impact | Prefix | -|----------|----------|--------|--------| -| 1 | Query Performance | CRITICAL | `query-` | -| 2 | Connection Management | CRITICAL | `conn-` | -| 3 | Security & RLS | CRITICAL | `security-` | -| 4 | Schema Design | HIGH | `schema-` | -| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | -| 6 | Data Access Patterns | MEDIUM | `data-` | -| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | -| 8 | Advanced Features | LOW | `advanced-` | - -## How to Use - -Read individual rule files for detailed explanations and SQL examples: - -``` -references/query-missing-indexes.md -references/schema-partial-indexes.md -references/_sections.md -``` - -Each rule file contains: -- Brief explanation of why it matters -- Incorrect SQL example with explanation -- Correct SQL example with explanation -- Optional EXPLAIN output or metrics -- Additional context and references -- Supabase-specific notes (when applicable) - -## References - -- https://www.postgresql.org/docs/current/ -- https://supabase.com/docs -- https://wiki.postgresql.org/wiki/Performance_Optimization -- https://supabase.com/docs/guides/database/overview -- https://supabase.com/docs/guides/auth/row-level-security diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/_contributing.md b/.zencoder/skills/supabase-postgres-best-practices/references/_contributing.md deleted file mode 100644 index 10de8ecb..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/_contributing.md +++ /dev/null @@ -1,171 +0,0 @@ -# Writing Guidelines for Postgres References - -This document provides guidelines for creating effective Postgres best -practice references that work well with AI agents and LLMs. - -## Key Principles - -### 1. Concrete Transformation Patterns - -Show exact SQL rewrites. Avoid philosophical advice. - -**Good:** "Use `WHERE id = ANY(ARRAY[...])` instead of -`WHERE id IN (SELECT ...)`" **Bad:** "Design good schemas" - -### 2. Error-First Structure - -Always show the problematic pattern first, then the solution. This trains agents -to recognize anti-patterns. - -```markdown -**Incorrect (sequential queries):** [bad example] - -**Correct (batched query):** [good example] -``` - -### 3. Quantified Impact - -Include specific metrics. Helps agents prioritize fixes. - -**Good:** "10x faster queries", "50% smaller index", "Eliminates N+1" -**Bad:** "Faster", "Better", "More efficient" - -### 4. Self-Contained Examples - -Examples should be complete and runnable (or close to it). Include `CREATE TABLE` -if context is needed. - -```sql --- Include table definition when needed for clarity -CREATE TABLE users ( - id bigint PRIMARY KEY, - email text NOT NULL, - deleted_at timestamptz -); - --- Now show the index -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; -``` - -### 5. Semantic Naming - -Use meaningful table/column names. Names carry intent for LLMs. - -**Good:** `users`, `email`, `created_at`, `is_active` -**Bad:** `table1`, `col1`, `field`, `flag` - ---- - -## Code Example Standards - -### SQL Formatting - -```sql --- Use lowercase keywords, clear formatting -CREATE INDEX CONCURRENTLY users_email_idx - ON users(email) - WHERE deleted_at IS NULL; - --- Not cramped or ALL CAPS -CREATE INDEX CONCURRENTLY USERS_EMAIL_IDX ON USERS(EMAIL) WHERE DELETED_AT IS NULL; -``` - -### Comments - -- Explain _why_, not _what_ -- Highlight performance implications -- Point out common pitfalls - -### Language Tags - -- `sql` - Standard SQL queries -- `plpgsql` - Stored procedures/functions -- `typescript` - Application code (when needed) -- `python` - Application code (when needed) - ---- - -## When to Include Application Code - -**Default: SQL Only** - -Most references should focus on pure SQL patterns. This keeps examples portable. - -**Include Application Code When:** - -- Connection pooling configuration -- Transaction management in application context -- ORM anti-patterns (N+1 in Prisma/TypeORM) -- Prepared statement usage - -**Format for Mixed Examples:** - -````markdown -**Incorrect (N+1 in application):** - -```typescript -for (const user of users) { - const posts = await db.query("SELECT * FROM posts WHERE user_id = $1", [ - user.id, - ]); -} -``` -```` - -**Correct (batch query):** - -```typescript -const posts = await db.query("SELECT * FROM posts WHERE user_id = ANY($1)", [ - userIds, -]); -``` - ---- - -## Impact Level Guidelines - -| Level | Improvement | Use When | -|-------|-------------|----------| -| **CRITICAL** | 10-100x | Missing indexes, connection exhaustion, sequential scans on large tables | -| **HIGH** | 5-20x | Wrong index types, poor partitioning, missing covering indexes | -| **MEDIUM-HIGH** | 2-5x | N+1 queries, inefficient pagination, RLS optimization | -| **MEDIUM** | 1.5-3x | Redundant indexes, query plan instability | -| **LOW-MEDIUM** | 1.2-2x | VACUUM tuning, configuration tweaks | -| **LOW** | Incremental | Advanced patterns, edge cases | - ---- - -## Reference Standards - -**Primary Sources:** - -- Official Postgres documentation -- Supabase documentation -- Postgres wiki -- Established blogs (2ndQuadrant, Crunchy Data) - -**Format:** - -```markdown -Reference: -[Postgres Indexes](https://www.postgresql.org/docs/current/indexes.html) -``` - ---- - -## Review Checklist - -Before submitting a reference: - -- [ ] Title is clear and action-oriented -- [ ] Impact level matches the performance gain -- [ ] impactDescription includes quantification -- [ ] Explanation is concise (1-2 sentences) -- [ ] Has at least 1 **Incorrect** SQL example -- [ ] Has at least 1 **Correct** SQL example -- [ ] SQL uses semantic naming -- [ ] Comments explain _why_, not _what_ -- [ ] Trade-offs mentioned if applicable -- [ ] Reference links included -- [ ] `npm run validate` passes -- [ ] `npm run build` generates correct output diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/_sections.md b/.zencoder/skills/supabase-postgres-best-practices/references/_sections.md deleted file mode 100644 index 8ba57c23..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/_sections.md +++ /dev/null @@ -1,39 +0,0 @@ -# Section Definitions - -This file defines the rule categories for Postgres best practices. Rules are automatically assigned to sections based on their filename prefix. - -Take the examples below as pure demonstrative. Replace each section with the actual rule categories for Postgres best practices. - ---- - -## 1. Query Performance (query) -**Impact:** CRITICAL -**Description:** Slow queries, missing indexes, inefficient query plans. The most common source of Postgres performance issues. - -## 2. Connection Management (conn) -**Impact:** CRITICAL -**Description:** Connection pooling, limits, and serverless strategies. Critical for applications with high concurrency or serverless deployments. - -## 3. Security & RLS (security) -**Impact:** CRITICAL -**Description:** Row-Level Security policies, privilege management, and authentication patterns. - -## 4. Schema Design (schema) -**Impact:** HIGH -**Description:** Table design, index strategies, partitioning, and data type selection. Foundation for long-term performance. - -## 5. Concurrency & Locking (lock) -**Impact:** MEDIUM-HIGH -**Description:** Transaction management, isolation levels, deadlock prevention, and lock contention patterns. - -## 6. Data Access Patterns (data) -**Impact:** MEDIUM -**Description:** N+1 query elimination, batch operations, cursor-based pagination, and efficient data fetching. - -## 7. Monitoring & Diagnostics (monitor) -**Impact:** LOW-MEDIUM -**Description:** Using pg_stat_statements, EXPLAIN ANALYZE, metrics collection, and performance diagnostics. - -## 8. Advanced Features (advanced) -**Impact:** LOW -**Description:** Full-text search, JSONB optimization, PostGIS, extensions, and advanced Postgres features. diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/_template.md b/.zencoder/skills/supabase-postgres-best-practices/references/_template.md deleted file mode 100644 index 91ace90e..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/_template.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Clear, Action-Oriented Title (e.g., "Use Partial Indexes for Filtered Queries") -impact: MEDIUM -impactDescription: 5-20x query speedup for filtered queries -tags: indexes, query-optimization, performance ---- - -## [Rule Title] - -[1-2 sentence explanation of the problem and why it matters. Focus on performance impact.] - -**Incorrect (describe the problem):** - -```sql --- Comment explaining what makes this slow/problematic -CREATE INDEX users_email_idx ON users(email); - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- This scans deleted records unnecessarily -``` - -**Correct (describe the solution):** - -```sql --- Comment explaining why this is better -CREATE INDEX users_active_email_idx ON users(email) WHERE deleted_at IS NULL; - -SELECT * FROM users WHERE email = 'user@example.com' AND deleted_at IS NULL; --- Only indexes active users, 10x smaller index, faster queries -``` - -[Optional: Additional context, edge cases, or trade-offs] - -Reference: [Postgres Docs](https://www.postgresql.org/docs/current/) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md b/.zencoder/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md deleted file mode 100644 index 582cbeaa..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use tsvector for Full-Text Search -impact: MEDIUM -impactDescription: 100x faster than LIKE, with ranking support -tags: full-text-search, tsvector, gin, search ---- - -## Use tsvector for Full-Text Search - -LIKE with wildcards can't use indexes. Full-text search with tsvector is orders of magnitude faster. - -**Incorrect (LIKE pattern matching):** - -```sql --- Cannot use index, scans all rows -select * from articles where content like '%postgresql%'; - --- Case-insensitive makes it worse -select * from articles where lower(content) like '%postgresql%'; -``` - -**Correct (full-text search with tsvector):** - -```sql --- Add tsvector column and index -alter table articles add column search_vector tsvector - generated always as (to_tsvector('english', coalesce(title,'') || ' ' || coalesce(content,''))) stored; - -create index articles_search_idx on articles using gin (search_vector); - --- Fast full-text search -select * from articles -where search_vector @@ to_tsquery('english', 'postgresql & performance'); - --- With ranking -select *, ts_rank(search_vector, query) as rank -from articles, to_tsquery('english', 'postgresql') query -where search_vector @@ query -order by rank desc; -``` - -Search multiple terms: - -```sql --- AND: both terms required -to_tsquery('postgresql & performance') - --- OR: either term -to_tsquery('postgresql | mysql') - --- Prefix matching -to_tsquery('post:*') -``` - -Reference: [Full Text Search](https://supabase.com/docs/guides/database/full-text-search) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md b/.zencoder/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md deleted file mode 100644 index e3d261ea..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Index JSONB Columns for Efficient Querying -impact: MEDIUM -impactDescription: 10-100x faster JSONB queries with proper indexing -tags: jsonb, gin, indexes, json ---- - -## Index JSONB Columns for Efficient Querying - -JSONB queries without indexes scan the entire table. Use GIN indexes for containment queries. - -**Incorrect (no index on JSONB):** - -```sql -create table products ( - id bigint primary key, - attributes jsonb -); - --- Full table scan for every query -select * from products where attributes @> '{"color": "red"}'; -select * from products where attributes->>'brand' = 'Nike'; -``` - -**Correct (GIN index for JSONB):** - -```sql --- GIN index for containment operators (@>, ?, ?&, ?|) -create index products_attrs_gin on products using gin (attributes); - --- Now containment queries use the index -select * from products where attributes @> '{"color": "red"}'; - --- For specific key lookups, use expression index -create index products_brand_idx on products ((attributes->>'brand')); -select * from products where attributes->>'brand' = 'Nike'; -``` - -Choose the right operator class: - -```sql --- jsonb_ops (default): supports all operators, larger index -create index idx1 on products using gin (attributes); - --- jsonb_path_ops: only @> operator, but 2-3x smaller index -create index idx2 on products using gin (attributes jsonb_path_ops); -``` - -Reference: [JSONB Indexes](https://www.postgresql.org/docs/current/datatype-json.html#JSON-INDEXING) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md b/.zencoder/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md deleted file mode 100644 index 40b9cc50..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Configure Idle Connection Timeouts -impact: HIGH -impactDescription: Reclaim 30-50% of connection slots from idle clients -tags: connections, timeout, idle, resource-management ---- - -## Configure Idle Connection Timeouts - -Idle connections waste resources. Configure timeouts to automatically reclaim them. - -**Incorrect (connections held indefinitely):** - -```sql --- No timeout configured -show idle_in_transaction_session_timeout; -- 0 (disabled) - --- Connections stay open forever, even when idle -select pid, state, state_change, query -from pg_stat_activity -where state = 'idle in transaction'; --- Shows transactions idle for hours, holding locks -``` - -**Correct (automatic cleanup of idle connections):** - -```sql --- Terminate connections idle in transaction after 30 seconds -alter system set idle_in_transaction_session_timeout = '30s'; - --- Terminate completely idle connections after 10 minutes -alter system set idle_session_timeout = '10min'; - --- Reload configuration -select pg_reload_conf(); -``` - -For pooled connections, configure at the pooler level: - -```ini -# pgbouncer.ini -server_idle_timeout = 60 -client_idle_timeout = 300 -``` - -Reference: [Connection Timeouts](https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/conn-limits.md b/.zencoder/skills/supabase-postgres-best-practices/references/conn-limits.md deleted file mode 100644 index cb3e400c..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/conn-limits.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Set Appropriate Connection Limits -impact: CRITICAL -impactDescription: Prevent database crashes and memory exhaustion -tags: connections, max-connections, limits, stability ---- - -## Set Appropriate Connection Limits - -Too many connections exhaust memory and degrade performance. Set limits based on available resources. - -**Incorrect (unlimited or excessive connections):** - -```sql --- Default max_connections = 100, but often increased blindly -show max_connections; -- 500 (way too high for 4GB RAM) - --- Each connection uses 1-3MB RAM --- 500 connections * 2MB = 1GB just for connections! --- Out of memory errors under load -``` - -**Correct (calculate based on resources):** - -```sql --- Formula: max_connections = (RAM in MB / 5MB per connection) - reserved --- For 4GB RAM: (4096 / 5) - 10 = ~800 theoretical max --- But practically, 100-200 is better for query performance - --- Recommended settings for 4GB RAM -alter system set max_connections = 100; - --- Also set work_mem appropriately --- work_mem * max_connections should not exceed 25% of RAM -alter system set work_mem = '8MB'; -- 8MB * 100 = 800MB max -``` - -Monitor connection usage: - -```sql -select count(*), state from pg_stat_activity group by state; -``` - -Reference: [Database Connections](https://supabase.com/docs/guides/platform/performance#connection-management) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/conn-pooling.md b/.zencoder/skills/supabase-postgres-best-practices/references/conn-pooling.md deleted file mode 100644 index e2ebd581..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/conn-pooling.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Use Connection Pooling for All Applications -impact: CRITICAL -impactDescription: Handle 10-100x more concurrent users -tags: connection-pooling, pgbouncer, performance, scalability ---- - -## Use Connection Pooling for All Applications - -Postgres connections are expensive (1-3MB RAM each). Without pooling, applications exhaust connections under load. - -**Incorrect (new connection per request):** - -```sql --- Each request creates a new connection --- Application code: db.connect() per request --- Result: 500 concurrent users = 500 connections = crashed database - --- Check current connections -select count(*) from pg_stat_activity; -- 487 connections! -``` - -**Correct (connection pooling):** - -```sql --- Use a pooler like PgBouncer between app and database --- Application connects to pooler, pooler reuses a small pool to Postgres - --- Configure pool_size based on: (CPU cores * 2) + spindle_count --- Example for 4 cores: pool_size = 10 - --- Result: 500 concurrent users share 10 actual connections -select count(*) from pg_stat_activity; -- 10 connections -``` - -Pool modes: - -- **Transaction mode**: connection returned after each transaction (best for most apps) -- **Session mode**: connection held for entire session (needed for prepared statements, temp tables) - -Reference: [Connection Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pooler) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md b/.zencoder/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md deleted file mode 100644 index 555547d8..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Use Prepared Statements Correctly with Pooling -impact: HIGH -impactDescription: Avoid prepared statement conflicts in pooled environments -tags: prepared-statements, connection-pooling, transaction-mode ---- - -## Use Prepared Statements Correctly with Pooling - -Prepared statements are tied to individual database connections. In transaction-mode pooling, connections are shared, causing conflicts. - -**Incorrect (named prepared statements with transaction pooling):** - -```sql --- Named prepared statement -prepare get_user as select * from users where id = $1; - --- In transaction mode pooling, next request may get different connection -execute get_user(123); --- ERROR: prepared statement "get_user" does not exist -``` - -**Correct (use unnamed statements or session mode):** - -```sql --- Option 1: Use unnamed prepared statements (most ORMs do this automatically) --- The query is prepared and executed in a single protocol message - --- Option 2: Deallocate after use in transaction mode -prepare get_user as select * from users where id = $1; -execute get_user(123); -deallocate get_user; - --- Option 3: Use session mode pooling (port 5432 vs 6543) --- Connection is held for entire session, prepared statements persist -``` - -Check your driver settings: - -```sql --- Many drivers use prepared statements by default --- Node.js pg: { prepare: false } to disable --- JDBC: prepareThreshold=0 to disable -``` - -Reference: [Prepared Statements with Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pool-modes) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/data-batch-inserts.md b/.zencoder/skills/supabase-postgres-best-practices/references/data-batch-inserts.md deleted file mode 100644 index 997947cb..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/data-batch-inserts.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Batch INSERT Statements for Bulk Data -impact: MEDIUM -impactDescription: 10-50x faster bulk inserts -tags: batch, insert, bulk, performance, copy ---- - -## Batch INSERT Statements for Bulk Data - -Individual INSERT statements have high overhead. Batch multiple rows in single statements or use COPY. - -**Incorrect (individual inserts):** - -```sql --- Each insert is a separate transaction and round trip -insert into events (user_id, action) values (1, 'click'); -insert into events (user_id, action) values (1, 'view'); -insert into events (user_id, action) values (2, 'click'); --- ... 1000 more individual inserts - --- 1000 inserts = 1000 round trips = slow -``` - -**Correct (batch insert):** - -```sql --- Multiple rows in single statement -insert into events (user_id, action) values - (1, 'click'), - (1, 'view'), - (2, 'click'), - -- ... up to ~1000 rows per batch - (999, 'view'); - --- One round trip for 1000 rows -``` - -For large imports, use COPY: - -```sql --- COPY is fastest for bulk loading -copy events (user_id, action, created_at) -from '/path/to/data.csv' -with (format csv, header true); - --- Or from stdin in application -copy events (user_id, action) from stdin with (format csv); -1,click -1,view -2,click -\. -``` - -Reference: [COPY](https://www.postgresql.org/docs/current/sql-copy.html) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/data-n-plus-one.md b/.zencoder/skills/supabase-postgres-best-practices/references/data-n-plus-one.md deleted file mode 100644 index 2109186f..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/data-n-plus-one.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Eliminate N+1 Queries with Batch Loading -impact: MEDIUM-HIGH -impactDescription: 10-100x fewer database round trips -tags: n-plus-one, batch, performance, queries ---- - -## Eliminate N+1 Queries with Batch Loading - -N+1 queries execute one query per item in a loop. Batch them into a single query using arrays or JOINs. - -**Incorrect (N+1 queries):** - -```sql --- First query: get all users -select id from users where active = true; -- Returns 100 IDs - --- Then N queries, one per user -select * from orders where user_id = 1; -select * from orders where user_id = 2; -select * from orders where user_id = 3; --- ... 97 more queries! - --- Total: 101 round trips to database -``` - -**Correct (single batch query):** - -```sql --- Collect IDs and query once with ANY -select * from orders where user_id = any(array[1, 2, 3, ...]); - --- Or use JOIN instead of loop -select u.id, u.name, o.* -from users u -left join orders o on o.user_id = u.id -where u.active = true; - --- Total: 1 round trip -``` - -Application pattern: - -```sql --- Instead of looping in application code: --- for user in users: db.query("SELECT * FROM orders WHERE user_id = $1", user.id) - --- Pass array parameter: -select * from orders where user_id = any($1::bigint[]); --- Application passes: [1, 2, 3, 4, 5, ...] -``` - -Reference: [N+1 Query Problem](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/data-pagination.md b/.zencoder/skills/supabase-postgres-best-practices/references/data-pagination.md deleted file mode 100644 index 633d8393..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/data-pagination.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use Cursor-Based Pagination Instead of OFFSET -impact: MEDIUM-HIGH -impactDescription: Consistent O(1) performance regardless of page depth -tags: pagination, cursor, keyset, offset, performance ---- - -## Use Cursor-Based Pagination Instead of OFFSET - -OFFSET-based pagination scans all skipped rows, getting slower on deeper pages. Cursor pagination is O(1). - -**Incorrect (OFFSET pagination):** - -```sql --- Page 1: scans 20 rows -select * from products order by id limit 20 offset 0; - --- Page 100: scans 2000 rows to skip 1980 -select * from products order by id limit 20 offset 1980; - --- Page 10000: scans 200,000 rows! -select * from products order by id limit 20 offset 199980; -``` - -**Correct (cursor/keyset pagination):** - -```sql --- Page 1: get first 20 -select * from products order by id limit 20; --- Application stores last_id = 20 - --- Page 2: start after last ID -select * from products where id > 20 order by id limit 20; --- Uses index, always fast regardless of page depth - --- Page 10000: same speed as page 1 -select * from products where id > 199980 order by id limit 20; -``` - -For multi-column sorting: - -```sql --- Cursor must include all sort columns -select * from products -where (created_at, id) > ('2024-01-15 10:00:00', 12345) -order by created_at, id -limit 20; -``` - -Reference: [Pagination](https://supabase.com/docs/guides/database/pagination) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/data-upsert.md b/.zencoder/skills/supabase-postgres-best-practices/references/data-upsert.md deleted file mode 100644 index bc95e230..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/data-upsert.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Use UPSERT for Insert-or-Update Operations -impact: MEDIUM -impactDescription: Atomic operation, eliminates race conditions -tags: upsert, on-conflict, insert, update ---- - -## Use UPSERT for Insert-or-Update Operations - -Using separate SELECT-then-INSERT/UPDATE creates race conditions. Use INSERT ... ON CONFLICT for atomic upserts. - -**Incorrect (check-then-insert race condition):** - -```sql --- Race condition: two requests check simultaneously -select * from settings where user_id = 123 and key = 'theme'; --- Both find nothing - --- Both try to insert -insert into settings (user_id, key, value) values (123, 'theme', 'dark'); --- One succeeds, one fails with duplicate key error! -``` - -**Correct (atomic UPSERT):** - -```sql --- Single atomic operation -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value, updated_at = now(); - --- Returns the inserted/updated row -insert into settings (user_id, key, value) -values (123, 'theme', 'dark') -on conflict (user_id, key) -do update set value = excluded.value -returning *; -``` - -Insert-or-ignore pattern: - -```sql --- Insert only if not exists (no update) -insert into page_views (page_id, user_id) -values (1, 123) -on conflict (page_id, user_id) do nothing; -``` - -Reference: [INSERT ON CONFLICT](https://www.postgresql.org/docs/current/sql-insert.html#SQL-ON-CONFLICT) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/lock-advisory.md b/.zencoder/skills/supabase-postgres-best-practices/references/lock-advisory.md deleted file mode 100644 index 572eaf0d..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/lock-advisory.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Use Advisory Locks for Application-Level Locking -impact: MEDIUM -impactDescription: Efficient coordination without row-level lock overhead -tags: advisory-locks, coordination, application-locks ---- - -## Use Advisory Locks for Application-Level Locking - -Advisory locks provide application-level coordination without requiring database rows to lock. - -**Incorrect (creating rows just for locking):** - -```sql --- Creating dummy rows to lock on -create table resource_locks ( - resource_name text primary key -); - -insert into resource_locks values ('report_generator'); - --- Lock by selecting the row -select * from resource_locks where resource_name = 'report_generator' for update; -``` - -**Correct (advisory locks):** - -```sql --- Session-level advisory lock (released on disconnect or unlock) -select pg_advisory_lock(hashtext('report_generator')); --- ... do exclusive work ... -select pg_advisory_unlock(hashtext('report_generator')); - --- Transaction-level lock (released on commit/rollback) -begin; -select pg_advisory_xact_lock(hashtext('daily_report')); --- ... do work ... -commit; -- Lock automatically released -``` - -Try-lock for non-blocking operations: - -```sql --- Returns immediately with true/false instead of waiting -select pg_try_advisory_lock(hashtext('resource_name')); - --- Use in application -if (acquired) { - -- Do work - select pg_advisory_unlock(hashtext('resource_name')); -} else { - -- Skip or retry later -} -``` - -Reference: [Advisory Locks](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md b/.zencoder/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md deleted file mode 100644 index 974da5ed..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Prevent Deadlocks with Consistent Lock Ordering -impact: MEDIUM-HIGH -impactDescription: Eliminate deadlock errors, improve reliability -tags: deadlocks, locking, transactions, ordering ---- - -## Prevent Deadlocks with Consistent Lock Ordering - -Deadlocks occur when transactions lock resources in different orders. Always -acquire locks in a consistent order. - -**Incorrect (inconsistent lock ordering):** - -```sql --- Transaction A -- Transaction B -begin; begin; -update accounts update accounts -set balance = balance - 100 set balance = balance - 50 -where id = 1; where id = 2; -- B locks row 2 - -update accounts update accounts -set balance = balance + 100 set balance = balance + 50 -where id = 2; -- A waits for B where id = 1; -- B waits for A - --- DEADLOCK! Both waiting for each other -``` - -**Correct (lock rows in consistent order first):** - -```sql --- Explicitly acquire locks in ID order before updating -begin; -select * from accounts where id in (1, 2) order by id for update; - --- Now perform updates in any order - locks already held -update accounts set balance = balance - 100 where id = 1; -update accounts set balance = balance + 100 where id = 2; -commit; -``` - -Alternative: use a single statement to update atomically: - -```sql --- Single statement acquires all locks atomically -begin; -update accounts -set balance = balance + case id - when 1 then -100 - when 2 then 100 -end -where id in (1, 2); -commit; -``` - -Detect deadlocks in logs: - -```sql --- Check for recent deadlocks -select * from pg_stat_database where deadlocks > 0; - --- Enable deadlock logging -set log_lock_waits = on; -set deadlock_timeout = '1s'; -``` - -Reference: -[Deadlocks](https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-DEADLOCKS) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/lock-short-transactions.md b/.zencoder/skills/supabase-postgres-best-practices/references/lock-short-transactions.md deleted file mode 100644 index e6b8ef26..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/lock-short-transactions.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Keep Transactions Short to Reduce Lock Contention -impact: MEDIUM-HIGH -impactDescription: 3-5x throughput improvement, fewer deadlocks -tags: transactions, locking, contention, performance ---- - -## Keep Transactions Short to Reduce Lock Contention - -Long-running transactions hold locks that block other queries. Keep transactions as short as possible. - -**Incorrect (long transaction with external calls):** - -```sql -begin; -select * from orders where id = 1 for update; -- Lock acquired - --- Application makes HTTP call to payment API (2-5 seconds) --- Other queries on this row are blocked! - -update orders set status = 'paid' where id = 1; -commit; -- Lock held for entire duration -``` - -**Correct (minimal transaction scope):** - -```sql --- Validate data and call APIs outside transaction --- Application: response = await paymentAPI.charge(...) - --- Only hold lock for the actual update -begin; -update orders -set status = 'paid', payment_id = $1 -where id = $2 and status = 'pending' -returning *; -commit; -- Lock held for milliseconds -``` - -Use `statement_timeout` to prevent runaway transactions: - -```sql --- Abort queries running longer than 30 seconds -set statement_timeout = '30s'; - --- Or per-session -set local statement_timeout = '5s'; -``` - -Reference: [Transaction Management](https://www.postgresql.org/docs/current/tutorial-transactions.html) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/lock-skip-locked.md b/.zencoder/skills/supabase-postgres-best-practices/references/lock-skip-locked.md deleted file mode 100644 index 77bdbb97..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/lock-skip-locked.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Use SKIP LOCKED for Non-Blocking Queue Processing -impact: MEDIUM-HIGH -impactDescription: 10x throughput for worker queues -tags: skip-locked, queue, workers, concurrency ---- - -## Use SKIP LOCKED for Non-Blocking Queue Processing - -When multiple workers process a queue, SKIP LOCKED allows workers to process different rows without waiting. - -**Incorrect (workers block each other):** - -```sql --- Worker 1 and Worker 2 both try to get next job -begin; -select * from jobs where status = 'pending' order by created_at limit 1 for update; --- Worker 2 waits for Worker 1's lock to release! -``` - -**Correct (SKIP LOCKED for parallel processing):** - -```sql --- Each worker skips locked rows and gets the next available -begin; -select * from jobs -where status = 'pending' -order by created_at -limit 1 -for update skip locked; - --- Worker 1 gets job 1, Worker 2 gets job 2 (no waiting) - -update jobs set status = 'processing' where id = $1; -commit; -``` - -Complete queue pattern: - -```sql --- Atomic claim-and-update in one statement -update jobs -set status = 'processing', worker_id = $1, started_at = now() -where id = ( - select id from jobs - where status = 'pending' - order by created_at - limit 1 - for update skip locked -) -returning *; -``` - -Reference: [SELECT FOR UPDATE SKIP LOCKED](https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md b/.zencoder/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md deleted file mode 100644 index 542978c3..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use EXPLAIN ANALYZE to Diagnose Slow Queries -impact: LOW-MEDIUM -impactDescription: Identify exact bottlenecks in query execution -tags: explain, analyze, diagnostics, query-plan ---- - -## Use EXPLAIN ANALYZE to Diagnose Slow Queries - -EXPLAIN ANALYZE executes the query and shows actual timings, revealing the true performance bottlenecks. - -**Incorrect (guessing at performance issues):** - -```sql --- Query is slow, but why? -select * from orders where customer_id = 123 and status = 'pending'; --- "It must be missing an index" - but which one? -``` - -**Correct (use EXPLAIN ANALYZE):** - -```sql -explain (analyze, buffers, format text) -select * from orders where customer_id = 123 and status = 'pending'; - --- Output reveals the issue: --- Seq Scan on orders (cost=0.00..25000.00 rows=50 width=100) (actual time=0.015..450.123 rows=50 loops=1) --- Filter: ((customer_id = 123) AND (status = 'pending'::text)) --- Rows Removed by Filter: 999950 --- Buffers: shared hit=5000 read=15000 --- Planning Time: 0.150 ms --- Execution Time: 450.500 ms -``` - -Key things to look for: - -```sql --- Seq Scan on large tables = missing index --- Rows Removed by Filter = poor selectivity or missing index --- Buffers: read >> hit = data not cached, needs more memory --- Nested Loop with high loops = consider different join strategy --- Sort Method: external merge = work_mem too low -``` - -Reference: [EXPLAIN](https://supabase.com/docs/guides/database/inspect) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md b/.zencoder/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md deleted file mode 100644 index d7e82f1a..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Enable pg_stat_statements for Query Analysis -impact: LOW-MEDIUM -impactDescription: Identify top resource-consuming queries -tags: pg-stat-statements, monitoring, statistics, performance ---- - -## Enable pg_stat_statements for Query Analysis - -pg_stat_statements tracks execution statistics for all queries, helping identify slow and frequent queries. - -**Incorrect (no visibility into query patterns):** - -```sql --- Database is slow, but which queries are the problem? --- No way to know without pg_stat_statements -``` - -**Correct (enable and query pg_stat_statements):** - -```sql --- Enable the extension -create extension if not exists pg_stat_statements; - --- Find slowest queries by total time -select - calls, - round(total_exec_time::numeric, 2) as total_time_ms, - round(mean_exec_time::numeric, 2) as mean_time_ms, - query -from pg_stat_statements -order by total_exec_time desc -limit 10; - --- Find most frequent queries -select calls, query -from pg_stat_statements -order by calls desc -limit 10; - --- Reset statistics after optimization -select pg_stat_statements_reset(); -``` - -Key metrics to monitor: - -```sql --- Queries with high mean time (candidates for optimization) -select query, mean_exec_time, calls -from pg_stat_statements -where mean_exec_time > 100 -- > 100ms average -order by mean_exec_time desc; -``` - -Reference: [pg_stat_statements](https://supabase.com/docs/guides/database/extensions/pg_stat_statements) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md b/.zencoder/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md deleted file mode 100644 index e0e8ea0b..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Maintain Table Statistics with VACUUM and ANALYZE -impact: MEDIUM -impactDescription: 2-10x better query plans with accurate statistics -tags: vacuum, analyze, statistics, maintenance, autovacuum ---- - -## Maintain Table Statistics with VACUUM and ANALYZE - -Outdated statistics cause the query planner to make poor decisions. VACUUM reclaims space, ANALYZE updates statistics. - -**Incorrect (stale statistics):** - -```sql --- Table has 1M rows but stats say 1000 --- Query planner chooses wrong strategy -explain select * from orders where status = 'pending'; --- Shows: Seq Scan (because stats show small table) --- Actually: Index Scan would be much faster -``` - -**Correct (maintain fresh statistics):** - -```sql --- Manually analyze after large data changes -analyze orders; - --- Analyze specific columns used in WHERE clauses -analyze orders (status, created_at); - --- Check when tables were last analyzed -select - relname, - last_vacuum, - last_autovacuum, - last_analyze, - last_autoanalyze -from pg_stat_user_tables -order by last_analyze nulls first; -``` - -Autovacuum tuning for busy tables: - -```sql --- Increase frequency for high-churn tables -alter table orders set ( - autovacuum_vacuum_scale_factor = 0.05, -- Vacuum at 5% dead tuples (default 20%) - autovacuum_analyze_scale_factor = 0.02 -- Analyze at 2% changes (default 10%) -); - --- Check autovacuum status -select * from pg_stat_progress_vacuum; -``` - -Reference: [VACUUM](https://supabase.com/docs/guides/database/database-size#vacuum-operations) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/query-composite-indexes.md b/.zencoder/skills/supabase-postgres-best-practices/references/query-composite-indexes.md deleted file mode 100644 index fea64523..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/query-composite-indexes.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Create Composite Indexes for Multi-Column Queries -impact: HIGH -impactDescription: 5-10x faster multi-column queries -tags: indexes, composite-index, multi-column, query-optimization ---- - -## Create Composite Indexes for Multi-Column Queries - -When queries filter on multiple columns, a composite index is more efficient than separate single-column indexes. - -**Incorrect (separate indexes require bitmap scan):** - -```sql --- Two separate indexes -create index orders_status_idx on orders (status); -create index orders_created_idx on orders (created_at); - --- Query must combine both indexes (slower) -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Correct (composite index):** - -```sql --- Single composite index (leftmost column first for equality checks) -create index orders_status_created_idx on orders (status, created_at); - --- Query uses one efficient index scan -select * from orders where status = 'pending' and created_at > '2024-01-01'; -``` - -**Column order matters** - place equality columns first, range columns last: - -```sql --- Good: status (=) before created_at (>) -create index idx on orders (status, created_at); - --- Works for: WHERE status = 'pending' --- Works for: WHERE status = 'pending' AND created_at > '2024-01-01' --- Does NOT work for: WHERE created_at > '2024-01-01' (leftmost prefix rule) -``` - -Reference: [Multicolumn Indexes](https://www.postgresql.org/docs/current/indexes-multicolumn.html) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/query-covering-indexes.md b/.zencoder/skills/supabase-postgres-best-practices/references/query-covering-indexes.md deleted file mode 100644 index 9d2a4947..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/query-covering-indexes.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Use Covering Indexes to Avoid Table Lookups -impact: MEDIUM-HIGH -impactDescription: 2-5x faster queries by eliminating heap fetches -tags: indexes, covering-index, include, index-only-scan ---- - -## Use Covering Indexes to Avoid Table Lookups - -Covering indexes include all columns needed by a query, enabling index-only scans that skip the table entirely. - -**Incorrect (index scan + heap fetch):** - -```sql -create index users_email_idx on users (email); - --- Must fetch name and created_at from table heap -select email, name, created_at from users where email = 'user@example.com'; -``` - -**Correct (index-only scan with INCLUDE):** - -```sql --- Include non-searchable columns in the index -create index users_email_idx on users (email) include (name, created_at); - --- All columns served from index, no table access needed -select email, name, created_at from users where email = 'user@example.com'; -``` - -Use INCLUDE for columns you SELECT but don't filter on: - -```sql --- Searching by status, but also need customer_id and total -create index orders_status_idx on orders (status) include (customer_id, total); - -select status, customer_id, total from orders where status = 'shipped'; -``` - -Reference: [Index-Only Scans](https://www.postgresql.org/docs/current/indexes-index-only-scans.html) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/query-index-types.md b/.zencoder/skills/supabase-postgres-best-practices/references/query-index-types.md deleted file mode 100644 index 93b32590..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/query-index-types.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Choose the Right Index Type for Your Data -impact: HIGH -impactDescription: 10-100x improvement with correct index type -tags: indexes, btree, gin, gist, brin, hash, index-types ---- - -## Choose the Right Index Type for Your Data - -Different index types excel at different query patterns. The default B-tree isn't always optimal. - -**Incorrect (B-tree for JSONB containment):** - -```sql --- B-tree cannot optimize containment operators -create index products_attrs_idx on products (attributes); -select * from products where attributes @> '{"color": "red"}'; --- Full table scan - B-tree doesn't support @> operator -``` - -**Correct (GIN for JSONB):** - -```sql --- GIN supports @>, ?, ?&, ?| operators -create index products_attrs_idx on products using gin (attributes); -select * from products where attributes @> '{"color": "red"}'; -``` - -Index type guide: - -```sql --- B-tree (default): =, <, >, BETWEEN, IN, IS NULL -create index users_created_idx on users (created_at); - --- GIN: arrays, JSONB, full-text search -create index posts_tags_idx on posts using gin (tags); - --- GiST: geometric data, range types, nearest-neighbor (KNN) queries -create index locations_idx on places using gist (location); - --- BRIN: large time-series tables (10-100x smaller) -create index events_time_idx on events using brin (created_at); - --- Hash: equality-only (slightly faster than B-tree for =) -create index sessions_token_idx on sessions using hash (token); -``` - -Reference: [Index Types](https://www.postgresql.org/docs/current/indexes-types.html) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/query-missing-indexes.md b/.zencoder/skills/supabase-postgres-best-practices/references/query-missing-indexes.md deleted file mode 100644 index e6daace7..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/query-missing-indexes.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Add Indexes on WHERE and JOIN Columns -impact: CRITICAL -impactDescription: 100-1000x faster queries on large tables -tags: indexes, performance, sequential-scan, query-optimization ---- - -## Add Indexes on WHERE and JOIN Columns - -Queries filtering or joining on unindexed columns cause full table scans, which become exponentially slower as tables grow. - -**Incorrect (sequential scan on large table):** - -```sql --- No index on customer_id causes full table scan -select * from orders where customer_id = 123; - --- EXPLAIN shows: Seq Scan on orders (cost=0.00..25000.00 rows=100 width=85) -``` - -**Correct (index scan):** - -```sql --- Create index on frequently filtered column -create index orders_customer_id_idx on orders (customer_id); - -select * from orders where customer_id = 123; - --- EXPLAIN shows: Index Scan using orders_customer_id_idx (cost=0.42..8.44 rows=100 width=85) -``` - -For JOIN columns, always index the foreign key side: - -```sql --- Index the referencing column -create index orders_customer_id_idx on orders (customer_id); - -select c.name, o.total -from customers c -join orders o on o.customer_id = c.id; -``` - -Reference: [Query Optimization](https://supabase.com/docs/guides/database/query-optimization) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/query-partial-indexes.md b/.zencoder/skills/supabase-postgres-best-practices/references/query-partial-indexes.md deleted file mode 100644 index 3e61a341..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/query-partial-indexes.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Use Partial Indexes for Filtered Queries -impact: HIGH -impactDescription: 5-20x smaller indexes, faster writes and queries -tags: indexes, partial-index, query-optimization, storage ---- - -## Use Partial Indexes for Filtered Queries - -Partial indexes only include rows matching a WHERE condition, making them smaller and faster when queries consistently filter on the same condition. - -**Incorrect (full index includes irrelevant rows):** - -```sql --- Index includes all rows, even soft-deleted ones -create index users_email_idx on users (email); - --- Query always filters active users -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -**Correct (partial index matches query filter):** - -```sql --- Index only includes active users -create index users_active_email_idx on users (email) -where deleted_at is null; - --- Query uses the smaller, faster index -select * from users where email = 'user@example.com' and deleted_at is null; -``` - -Common use cases for partial indexes: - -```sql --- Only pending orders (status rarely changes once completed) -create index orders_pending_idx on orders (created_at) -where status = 'pending'; - --- Only non-null values -create index products_sku_idx on products (sku) -where sku is not null; -``` - -Reference: [Partial Indexes](https://www.postgresql.org/docs/current/indexes-partial.html) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/schema-constraints.md b/.zencoder/skills/supabase-postgres-best-practices/references/schema-constraints.md deleted file mode 100644 index 1d2ef8f9..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/schema-constraints.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Add Constraints Safely in Migrations -impact: HIGH -impactDescription: Prevents migration failures and enables idempotent schema changes -tags: constraints, migrations, schema, alter-table ---- - -## Add Constraints Safely in Migrations - -PostgreSQL does not support `ADD CONSTRAINT IF NOT EXISTS`. Migrations using this syntax will fail. - -**Incorrect (causes syntax error):** - -```sql --- ERROR: syntax error at or near "not" (SQLSTATE 42601) -alter table public.profiles -add constraint if not exists profiles_birthchart_id_unique unique (birthchart_id); -``` - -**Correct (idempotent constraint creation):** - -```sql --- Use DO block to check before adding -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_unique' - and conrelid = 'public.profiles'::regclass - ) then - alter table public.profiles - add constraint profiles_birthchart_id_unique unique (birthchart_id); - end if; -end $$; -``` - -For all constraint types: - -```sql --- Check constraints -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'check_age_positive' - ) then - alter table users add constraint check_age_positive check (age > 0); - end if; -end $$; - --- Foreign keys -do $$ -begin - if not exists ( - select 1 from pg_constraint - where conname = 'profiles_birthchart_id_fkey' - ) then - alter table profiles - add constraint profiles_birthchart_id_fkey - foreign key (birthchart_id) references birthcharts(id); - end if; -end $$; -``` - -Check if constraint exists: - -```sql --- Query to check constraint existence -select conname, contype, pg_get_constraintdef(oid) -from pg_constraint -where conrelid = 'public.profiles'::regclass; - --- contype values: --- 'p' = PRIMARY KEY --- 'f' = FOREIGN KEY --- 'u' = UNIQUE --- 'c' = CHECK -``` - -Reference: [Constraints](https://www.postgresql.org/docs/current/ddl-constraints.html) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/schema-data-types.md b/.zencoder/skills/supabase-postgres-best-practices/references/schema-data-types.md deleted file mode 100644 index f253a581..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/schema-data-types.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Choose Appropriate Data Types -impact: HIGH -impactDescription: 50% storage reduction, faster comparisons -tags: data-types, schema, storage, performance ---- - -## Choose Appropriate Data Types - -Using the right data types reduces storage, improves query performance, and prevents bugs. - -**Incorrect (wrong data types):** - -```sql -create table users ( - id int, -- Will overflow at 2.1 billion - email varchar(255), -- Unnecessary length limit - created_at timestamp, -- Missing timezone info - is_active varchar(5), -- String for boolean - price varchar(20) -- String for numeric -); -``` - -**Correct (appropriate data types):** - -```sql -create table users ( - id bigint generated always as identity primary key, -- 9 quintillion max - email text, -- No artificial limit, same performance as varchar - created_at timestamptz, -- Always store timezone-aware timestamps - is_active boolean default true, -- 1 byte vs variable string length - price numeric(10,2) -- Exact decimal arithmetic -); -``` - -Key guidelines: - -```sql --- IDs: use bigint, not int (future-proofing) --- Strings: use text, not varchar(n) unless constraint needed --- Time: use timestamptz, not timestamp --- Money: use numeric, not float (precision matters) --- Enums: use text with check constraint or create enum type -``` - -Reference: [Data Types](https://www.postgresql.org/docs/current/datatype.html) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md b/.zencoder/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md deleted file mode 100644 index 6c3d6ff6..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Index Foreign Key Columns -impact: HIGH -impactDescription: 10-100x faster JOINs and CASCADE operations -tags: foreign-key, indexes, joins, schema ---- - -## Index Foreign Key Columns - -Postgres does not automatically index foreign key columns. Missing indexes cause slow JOINs and CASCADE operations. - -**Incorrect (unindexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- No index on customer_id! --- JOINs and ON DELETE CASCADE both require full table scan -select * from orders where customer_id = 123; -- Seq Scan -delete from customers where id = 123; -- Locks table, scans all orders -``` - -**Correct (indexed foreign key):** - -```sql -create table orders ( - id bigint generated always as identity primary key, - customer_id bigint references customers(id) on delete cascade, - total numeric(10,2) -); - --- Always index the FK column -create index orders_customer_id_idx on orders (customer_id); - --- Now JOINs and cascades are fast -select * from orders where customer_id = 123; -- Index Scan -delete from customers where id = 123; -- Uses index, fast cascade -``` - -Find missing FK indexes: - -```sql -select - conrelid::regclass as table_name, - a.attname as fk_column -from pg_constraint c -join pg_attribute a on a.attrelid = c.conrelid and a.attnum = any(c.conkey) -where c.contype = 'f' - and not exists ( - select 1 from pg_index i - where i.indrelid = c.conrelid and a.attnum = any(i.indkey) - ); -``` - -Reference: [Foreign Keys](https://www.postgresql.org/docs/current/ddl-constraints.html#DDL-CONSTRAINTS-FK) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md b/.zencoder/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md deleted file mode 100644 index f0072940..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Use Lowercase Identifiers for Compatibility -impact: MEDIUM -impactDescription: Avoid case-sensitivity bugs with tools, ORMs, and AI assistants -tags: naming, identifiers, case-sensitivity, schema, conventions ---- - -## Use Lowercase Identifiers for Compatibility - -PostgreSQL folds unquoted identifiers to lowercase. Quoted mixed-case identifiers require quotes forever and cause issues with tools, ORMs, and AI assistants that may not recognize them. - -**Incorrect (mixed-case identifiers):** - -```sql --- Quoted identifiers preserve case but require quotes everywhere -CREATE TABLE "Users" ( - "userId" bigint PRIMARY KEY, - "firstName" text, - "lastName" text -); - --- Must always quote or queries fail -SELECT "firstName" FROM "Users" WHERE "userId" = 1; - --- This fails - Users becomes users without quotes -SELECT firstName FROM Users; --- ERROR: relation "users" does not exist -``` - -**Correct (lowercase snake_case):** - -```sql --- Unquoted lowercase identifiers are portable and tool-friendly -CREATE TABLE users ( - user_id bigint PRIMARY KEY, - first_name text, - last_name text -); - --- Works without quotes, recognized by all tools -SELECT first_name FROM users WHERE user_id = 1; -``` - -Common sources of mixed-case identifiers: - -```sql --- ORMs often generate quoted camelCase - configure them to use snake_case --- Migrations from other databases may preserve original casing --- Some GUI tools quote identifiers by default - disable this - --- If stuck with mixed-case, create views as a compatibility layer -CREATE VIEW users AS SELECT "userId" AS user_id, "firstName" AS first_name FROM "Users"; -``` - -Reference: [Identifiers and Key Words](https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/schema-partitioning.md b/.zencoder/skills/supabase-postgres-best-practices/references/schema-partitioning.md deleted file mode 100644 index 13137a03..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/schema-partitioning.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Partition Large Tables for Better Performance -impact: MEDIUM-HIGH -impactDescription: 5-20x faster queries and maintenance on large tables -tags: partitioning, large-tables, time-series, performance ---- - -## Partition Large Tables for Better Performance - -Partitioning splits a large table into smaller pieces, improving query performance and maintenance operations. - -**Incorrect (single large table):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz, - data jsonb -); - --- 500M rows, queries scan everything -select * from events where created_at > '2024-01-01'; -- Slow -vacuum events; -- Takes hours, locks table -``` - -**Correct (partitioned by time range):** - -```sql -create table events ( - id bigint generated always as identity, - created_at timestamptz not null, - data jsonb -) partition by range (created_at); - --- Create partitions for each month -create table events_2024_01 partition of events - for values from ('2024-01-01') to ('2024-02-01'); - -create table events_2024_02 partition of events - for values from ('2024-02-01') to ('2024-03-01'); - --- Queries only scan relevant partitions -select * from events where created_at > '2024-01-15'; -- Only scans events_2024_01+ - --- Drop old data instantly -drop table events_2023_01; -- Instant vs DELETE taking hours -``` - -When to partition: - -- Tables > 100M rows -- Time-series data with date-based queries -- Need to efficiently drop old data - -Reference: [Table Partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/schema-primary-keys.md b/.zencoder/skills/supabase-postgres-best-practices/references/schema-primary-keys.md deleted file mode 100644 index fb0fbb16..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/schema-primary-keys.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Select Optimal Primary Key Strategy -impact: HIGH -impactDescription: Better index locality, reduced fragmentation -tags: primary-key, identity, uuid, serial, schema ---- - -## Select Optimal Primary Key Strategy - -Primary key choice affects insert performance, index size, and replication -efficiency. - -**Incorrect (problematic PK choices):** - -```sql --- identity is the SQL-standard approach -create table users ( - id serial primary key -- Works, but IDENTITY is recommended -); - --- Random UUIDs (v4) cause index fragmentation -create table orders ( - id uuid default gen_random_uuid() primary key -- UUIDv4 = random = scattered inserts -); -``` - -**Correct (optimal PK strategies):** - -```sql --- Use IDENTITY for sequential IDs (SQL-standard, best for most cases) -create table users ( - id bigint generated always as identity primary key -); - --- For distributed systems needing UUIDs, use UUIDv7 (time-ordered) --- Requires pg_uuidv7 extension: create extension pg_uuidv7; -create table orders ( - id uuid default uuid_generate_v7() primary key -- Time-ordered, no fragmentation -); - --- Alternative: time-prefixed IDs for sortable, distributed IDs (no extension needed) -create table events ( - id text default concat( - to_char(now() at time zone 'utc', 'YYYYMMDDHH24MISSMS'), - gen_random_uuid()::text - ) primary key -); -``` - -Guidelines: - -- Single database: `bigint identity` (sequential, 8 bytes, SQL-standard) -- Distributed/exposed IDs: UUIDv7 (requires pg_uuidv7) or ULID (time-ordered, no - fragmentation) -- `serial` works but `identity` is SQL-standard and preferred for new - applications -- Avoid random UUIDs (v4) as primary keys on large tables (causes index - fragmentation) - -Reference: -[Identity Columns](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-PARMS-GENERATED-IDENTITY) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/security-privileges.md b/.zencoder/skills/supabase-postgres-best-practices/references/security-privileges.md deleted file mode 100644 index 448ec345..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/security-privileges.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Apply Principle of Least Privilege -impact: MEDIUM -impactDescription: Reduced attack surface, better audit trail -tags: privileges, security, roles, permissions ---- - -## Apply Principle of Least Privilege - -Grant only the minimum permissions required. Never use superuser for application queries. - -**Incorrect (overly broad permissions):** - -```sql --- Application uses superuser connection --- Or grants ALL to application role -grant all privileges on all tables in schema public to app_user; -grant all privileges on all sequences in schema public to app_user; - --- Any SQL injection becomes catastrophic --- drop table users; cascades to everything -``` - -**Correct (minimal, specific grants):** - -```sql --- Create role with no default privileges -create role app_readonly nologin; - --- Grant only SELECT on specific tables -grant usage on schema public to app_readonly; -grant select on public.products, public.categories to app_readonly; - --- Create role for writes with limited scope -create role app_writer nologin; -grant usage on schema public to app_writer; -grant select, insert, update on public.orders to app_writer; -grant usage on sequence orders_id_seq to app_writer; --- No DELETE permission - --- Login role inherits from these -create role app_user login password 'xxx'; -grant app_writer to app_user; -``` - -Revoke public defaults: - -```sql --- Revoke default public access -revoke all on schema public from public; -revoke all on all tables in schema public from public; -``` - -Reference: [Roles and Privileges](https://supabase.com/blog/postgres-roles-and-privileges) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/security-rls-basics.md b/.zencoder/skills/supabase-postgres-best-practices/references/security-rls-basics.md deleted file mode 100644 index c61e1a85..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/security-rls-basics.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Enable Row Level Security for Multi-Tenant Data -impact: CRITICAL -impactDescription: Database-enforced tenant isolation, prevent data leaks -tags: rls, row-level-security, multi-tenant, security ---- - -## Enable Row Level Security for Multi-Tenant Data - -Row Level Security (RLS) enforces data access at the database level, ensuring users only see their own data. - -**Incorrect (application-level filtering only):** - -```sql --- Relying only on application to filter -select * from orders where user_id = $current_user_id; - --- Bug or bypass means all data is exposed! -select * from orders; -- Returns ALL orders -``` - -**Correct (database-enforced RLS):** - -```sql --- Enable RLS on the table -alter table orders enable row level security; - --- Create policy for users to see only their orders -create policy orders_user_policy on orders - for all - using (user_id = current_setting('app.current_user_id')::bigint); - --- Force RLS even for table owners -alter table orders force row level security; - --- Set user context and query -set app.current_user_id = '123'; -select * from orders; -- Only returns orders for user 123 -``` - -Policy for authenticated role: - -```sql -create policy orders_user_policy on orders - for all - to authenticated - using (user_id = auth.uid()); -``` - -Reference: [Row Level Security](https://supabase.com/docs/guides/database/postgres/row-level-security) diff --git a/.zencoder/skills/supabase-postgres-best-practices/references/security-rls-performance.md b/.zencoder/skills/supabase-postgres-best-practices/references/security-rls-performance.md deleted file mode 100644 index b32d92f7..00000000 --- a/.zencoder/skills/supabase-postgres-best-practices/references/security-rls-performance.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Optimize RLS Policies for Performance -impact: HIGH -impactDescription: 5-10x faster RLS queries with proper patterns -tags: rls, performance, security, optimization ---- - -## Optimize RLS Policies for Performance - -Poorly written RLS policies can cause severe performance issues. Use subqueries and indexes strategically. - -**Incorrect (function called for every row):** - -```sql -create policy orders_policy on orders - using (auth.uid() = user_id); -- auth.uid() called per row! - --- With 1M rows, auth.uid() is called 1M times -``` - -**Correct (wrap functions in SELECT):** - -```sql -create policy orders_policy on orders - using ((select auth.uid()) = user_id); -- Called once, cached - --- 100x+ faster on large tables -``` - -Use security definer functions for complex checks: - -```sql --- Create helper function (runs as definer, bypasses RLS) -create or replace function is_team_member(team_id bigint) -returns boolean -language sql -security definer -set search_path = '' -as $$ - select exists ( - select 1 from public.team_members - where team_id = $1 and user_id = (select auth.uid()) - ); -$$; - --- Use in policy (indexed lookup, not per-row check) -create policy team_orders_policy on orders - using ((select is_team_member(team_id))); -``` - -Always add indexes on columns used in RLS policies: - -```sql -create index orders_user_id_idx on orders (user_id); -``` - -Reference: [RLS Performance](https://supabase.com/docs/guides/database/postgres/row-level-security#rls-performance-recommendations) diff --git a/.zenflow/settings.json b/.zenflow/settings.json deleted file mode 100644 index 121f25a5..00000000 --- a/.zenflow/settings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "setup_script": "npm install", - "dev_script": "npm run dev", - "verification_script": "npm run pipeline:quality", - "copy_files": [".env.local"] -} diff --git a/1/tradehaxai-assistant b/1/tradehaxai-assistant deleted file mode 160000 index 48587f34..00000000 --- a/1/tradehaxai-assistant +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 48587f34fb045496bb5395b36d61d88c8c1732e8 diff --git a/90_DAY_EXECUTION_PLAN.md b/90_DAY_EXECUTION_PLAN.md deleted file mode 100644 index ee8a9831..00000000 --- a/90_DAY_EXECUTION_PLAN.md +++ /dev/null @@ -1,250 +0,0 @@ -# TradeHax Digital Empire: 90-Day Execution Plan - -## 🎯 North Star Metric - -**Viral Coefficient: 0.3 β†’ 1.5** (compound network growth) - ---- - -## πŸ“… Phase 1: Foundation (Weeks 1-4) - "Make It Clear" - -### Week 1: Hero + Clarity βœ… COMPLETE - -- [x] Hero: "Multiply Your Edge" tagline -- [x] Value props: "For traders/creators/builders" -- [x] 60-second onboarding promise -- **Expected impact**: +30% CTA click-through rate - -### Week 2: Gamified Onboarding βœ… COMPLETE - -- [x] 4-step achievement system -- [x] Progress bars + badges -- [x] Reward messaging ($100 credits) -- **Expected impact**: 5x onboarding completion rate - -### Week 3: Leaderboards (HIGH PRIORITY - START NOW) - -**Why**: Drives daily return visits & competition - -```tsx -// /leaderboards route structure -- /leaderboards/trading (Real P&L, profit charts) -- /leaderboards/music (Listens, popularity) -- /leaderboards/services (Completed projects, ratings) -``` - -**Components needed**: - -- `LeaderboardCard` (user rank, stats, position) -- `RealTimeLeaderboard` (WebSocket for live updates) -- `PremiumBadge` (featured rank = $9/mo) - -**Metrics to track**: - -- Daily visits to leaderboard -- % of users checking rank -- Premium badge conversion - -### Week 4: Marketplace Skeleton (HIGH PRIORITY) - -**Why**: Unlocks creator economy - -```tsx -// /agents/marketplace structure -- Browse trading strategies -- Browse music templates -- Browse service blueprints -``` - -**MVP components**: - -- `AgentCard` (name, creator, rating, price) -- `SearchFilter` (category, rating, price range) -- `PreviewModal` (quick description, try demo) - -**Revenue model**: - -- TradeHax: 30% commission -- Creator: 70% revenue share -- Marketing: Free featured spot for 7 days - ---- - -## πŸ“… Phase 2: Network Effects (Weeks 5-8) - "Make It Stick" - -### Week 5: Discord Bot Integration - -**Distribution channel**: Deploy bot to Discord app directory - -``` -Commands available: -@tradehax scan AAPL (stock signal) -@tradehax signal ETH (crypto) -@tradehax generate "upbeat" (music idea) -@tradehax help (guide) -``` - -**Expected impact**: - -- Bot installs: 50 β†’ 10,000 guilds -- Free users from Discord: 1,000/week -- Premium conversions: 5% of new users = 50/week - -### Week 6: Referral Flywheel - -**Mechanism**: "Invite 3 friends, unlock Pro for free" - -``` -User A signs up -β”œβ”€ Gets unique referral link -β”‚ -User B joins via link -β”œβ”€ Gets 1,000 free credits -β”œβ”€ User A gets 1,000 credits (toward subscription) -β”‚ -When User B upgrades to Pro: -β”œβ”€ User A automatically unlocked Pro (3 invites) -└─ TradeHax: +1 upgraded user (net 70% LTV gain) -``` - -**Metrics**: - -- Referral rate: 10% β†’ 25% -- Viral coefficient: K = (1 invite/user) Γ— (25% conversion) = 0.25 β†’ 0.5+ - -### Week 7: Learning Center Framework - -**Model**: Free β†’ Freemium β†’ Premium - -``` -Tier 1: Free (YouTube-style) -β”œβ”€ "Trading 101: Signals Basics" (5 videos) -β”œβ”€ "Music 101: Composition Tips" (5 videos) -└─ "Services 101: Delivery Models" (5 videos) - -Tier 2: Intermediate (Optional) -β”œβ”€ "Advanced Strategies" (locked, email) -└─ "Creator Growth" (locked, email) - -Tier 3: Premium Certificate ($49) -β”œβ”€ "Mastery: Build Your Signal" -β”œβ”€ PDF + code templates -β”œβ”€ Slack community access -└─ % revenue share in marketplace -``` - -**Why**: Drives premium conversions + engagement - -### Week 8: Community Discord - -**Goal**: 500+ active members in community Discord - -``` -Channels: -#leaderboard-updates (daily top traders) -#strategy-showcase (creators share wins) -#general-chat (community) -#Premium tier upgrades = admin + role -``` - ---- - -## πŸ“… Phase 3: Monetization Clarity (Weeks 9-12) - "Make It Pay" - -### Week 9: Pricing Page Redesign - -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Free β”‚ Pro β”‚ Builder β”‚ Council β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ $0 β”‚ $9/mo β”‚ $29/mo β”‚ $99/mo β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ 1 scan/day β”‚ Unlimited β”‚ API access β”‚ 1:1 consult β”‚ -β”‚ Paper mode β”‚ Live trading β”‚ Agent build β”‚ Custom model β”‚ -β”‚ 1 gen/day β”‚ Premium sigs β”‚ Revenue % β”‚ White-label β”‚ -β”‚ Free learn β”‚ Learn access β”‚ Learn access β”‚ Learn access β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -**Design**: Radiant gradient, clear unlock paths, social proof - -### Week 10-11: Conversion Optimization - -- A/B test pricing (current vs. "$49 instead of $99") -- Landing page tests (value hierarchy) -- Email sequences (cart abandonment style) -- Premium feature teasers - -### Week 12: Metrics Review + Roadmap Q2 - -**Check these numbers:** - -- MAU growth: baseline β†’ target -- Referral coefficient -- Premium conversion rate -- Marketplace revenue -- Discord bot invites -- NPS score - ---- - -## 🎨 Quick Wins Summary (Easy Wins, Big Impact) - -| Priority | Feature | Effort | Impact | Owner | -|----------|---------|--------|--------|-------| -| 1 | Leaderboards | 3 days | +40% DAU | Frontend | -| 2 | Marketplace MVP | 5 days | +30% engagement | Full-stack | -| 3 | Discord bot commands | 2 days | +1K users/week | Backend | -| 4 | Referral tracking | 2 days | +K coefficient | Backend | -| 5 | Learning center | 5 days | +8% conversion | Content | -| 6 | Pricing page update | 1 day | +5% LTV | Design | - ---- - -## πŸ“Š Success Metrics (Track Weekly) - -``` -Week 1-4: Clear messaging β†’ +30% CTA, 5x onboarding -Week 5-8: Network effects β†’ Viral K = 0.5+, 10K Discord -Week 9-12: Monetization β†’ 8% premium conversion, $50K/m revenue -``` - ---- - -## πŸš€ Why This Wins - -1. **Clarity first**: Users understand *exactly* what they're getting -2. **Progress visible**: Achievements + leaderboards show results -3. **Community**: Server + Discord = network effects -4. **Creator economy**: Marketplace = compounding value as users create -5. **Mobile-ready**: Discord bot = accessible everywhere -6. **Revenue transparent**: Clear pricing = no surprises = higher LTV - ---- - -## πŸ“ Resource Checklist - -- [ ] Designer: Leaderboard UI + pricing page -- [ ] Backend: Leaderboard API + marketplace + Discord bot -- [ ] Frontend: Leaderboard components + marketplace UI -- [ ] Content: 15 learning videos + marketing copy -- [ ] Marketing: Discord bot listing page + referral landing page -- [ ] Analytics: Cohort analysis + funnel tracking - ---- - -## 🎯 Month 3 Vision - -**TradeHax becomes the network where:** - -- 50K users check their trading rank daily -- 5K creators earn $1K+/month from marketplace -- 1M Discord messages/month about strategies -- Premium tier is "obvious must-have" at $9/mo -- 1.5+ viral coefficient (compounding growth) - -**That's a digital empire.** πŸš€ - ---- - -*Next: Execute Week 1 βœ… then sync on Week 3 leaderboards* diff --git a/AGENTS.md b/AGENTS.md deleted file mode 100644 index 4b8581ee..00000000 --- a/AGENTS.md +++ /dev/null @@ -1,23 +0,0 @@ - - - -# General Guidelines for working with Nx - -- For navigating/exploring the workspace, invoke the `nx-workspace` skill first - it has patterns for querying projects, targets, and dependencies -- When running tasks (for example build, lint, test, e2e, etc.), always prefer running the task through `nx` (i.e. `nx run`, `nx run-many`, `nx affected`) instead of using the underlying tooling directly -- Prefix nx commands with the workspace's package manager (e.g., `pnpm nx build`, `npm exec nx test`) - avoids using globally installed CLI -- You have access to the Nx MCP server and its tools, use them to help the user -- For Nx plugin best practices, check `node_modules/@nx//PLUGIN.md`. Not all plugins have this file - proceed without it if unavailable. -- NEVER guess CLI flags - always check nx_docs or `--help` first when unsure - -## Scaffolding & Generators - -- For scaffolding tasks (creating apps, libs, project structure, setup), ALWAYS invoke the `nx-generate` skill FIRST before exploring or calling MCP tools - -## When to use nx_docs - -- USE for: advanced config options, unfamiliar flags, migration guides, plugin configuration, edge cases -- DON'T USE for: basic generator syntax (`nx g @nx/react:app`), standard commands, things you already know -- The `nx-generate` skill handles generator discovery internally - don't call nx_docs just to look up generator syntax - - \ No newline at end of file diff --git a/AI_ENVIRONMENT_STANDARDS.md b/AI_ENVIRONMENT_STANDARDS.md deleted file mode 100644 index 9c152d3c..00000000 --- a/AI_ENVIRONMENT_STANDARDS.md +++ /dev/null @@ -1,43 +0,0 @@ -# TradeHax AI Environment Standards - -This document defines a practical, production-grade baseline for the TradeHax LLM/GPT stack. - -## Core Standards - -- **Provider reliability**: configure one primary model (`HF_MODEL_ID`) and at least **2 fallback models** (`HF_FALLBACK_MODELS`). -- **Guardrailed generation**: keep temperature/top-p/max-length bounded and explicit. -- **Domain governance**: route per domain (`stock`, `crypto`, `kalshi`, `general`) and govern canary rollout via gates. -- **Safety in production**: keep open-mode flags off unless explicitly required. -- **Auditable config**: all env settings are validated via an automated doctor script. - -## Commands - -- `npm run ai:env:doctor` - - Runs standards checks and fails only on critical issues. -- `npm run ai:env:doctor:strict` - - Fails on warnings too; ideal for CI/CD gates. -- `npm run hf:sync-assets` - - Syncs relevant datasets/model cards to the active Hugging Face account. - -## Recommended Workflow - -1. Copy values from `AI_ENVIRONMENT_TEMPLATE.env` to `.env.local`. -2. Set production values in your deployment environment (Vercel project settings). -3. Run `npm run ai:env:doctor` locally. -4. Run `npm run ai:env:doctor:strict` in CI before deploy. -5. Keep `HF_FALLBACK_MODELS` diversified (different model families where possible). - -## Model Guidance (practical) - -- **Fast + low cost**: `openai/gpt-5-mini`, `microsoft/phi-4-mini-instruct` -- **Balanced quality**: `openai/gpt-5`, `Qwen/Qwen2.5-7B-Instruct` -- **Reasoning heavy**: `openai/o3`, `deepseek/deepseek-r1-0528` - -Use a cost-aware model for baseline and reserve premium models for high-value flows. - -## Security Notes - -- Never commit real tokens/keys. -- Use least-privilege tokens where possible. -- Rotate provider credentials on a schedule. -- Keep environment values in secret managers (not source control). diff --git a/AI_ENVIRONMENT_TEMPLATE.env b/AI_ENVIRONMENT_TEMPLATE.env deleted file mode 100644 index b431edaf..00000000 --- a/AI_ENVIRONMENT_TEMPLATE.env +++ /dev/null @@ -1,212 +0,0 @@ -# TradeHax AI / LLM production environment template -# Copy required values into .env.local for local dev and Vercel env for deployment. - -# ----------------------------- -# Core provider authentication -# ----------------------------- -HF_API_TOKEN=hf_xxx_replace_me -# Alternate Hugging Face key names (any one works) -HUGGINGFACE_API_TOKEN= -HUGGING_FACE_HUB_TOKEN= -HF_MODEL_ID=Qwen/Qwen2.5-7B-Instruct -HF_FALLBACK_MODELS=Qwen/Qwen2.5-7B-Instruct,microsoft/Phi-4-mini-instruct,meta-llama/Llama-3.3-70B-Instruct -HF_IMAGE_MODEL_ID=stabilityai/stable-diffusion-xl-base-1.0 - -# Optional alternate provider readiness -GITHUB_TOKEN=ghp_xxx_replace_me -OPENAI_API_KEY=sk-xxx_replace_me -AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com -AZURE_OPENAI_API_KEY=xxx_replace_me -AZURE_OPENAI_DEPLOYMENT=gpt-5-mini -ANTHROPIC_API_KEY= -GROQ_API_KEY= -TOGETHER_API_KEY= -COHERE_API_KEY= -MISTRAL_API_KEY= -DEEPSEEK_API_KEY= - -# ----------------------------- -# Global generation guardrails -# ----------------------------- -LLM_TEMPERATURE=0.4 -LLM_TOP_P=0.9 -LLM_MAX_LENGTH=800 - -# ----------------------------- -# Preset routing model controls -# ----------------------------- -TRADEHAX_PRESET_NAVIGATOR_MODEL=Qwen/Qwen2.5-7B-Instruct -TRADEHAX_PRESET_OPERATOR_MODEL=Qwen/Qwen2.5-7B-Instruct -TRADEHAX_PRESET_ANALYST_MODEL=Qwen/Qwen2.5-7B-Instruct -TRADEHAX_PRESET_CREATIVE_MODEL=Qwen/Qwen2.5-7B-Instruct -TRADEHAX_PRESET_RESEARCH_MODEL=Qwen/Qwen2.5-7B-Instruct -TRADEHAX_PRESET_FALLBACK_MODEL=microsoft/Phi-4-mini-instruct - -TRADEHAX_PRESET_NAVIGATOR_TEMP=0.35 -TRADEHAX_PRESET_OPERATOR_TEMP=0.5 -TRADEHAX_PRESET_ANALYST_TEMP=0.25 -TRADEHAX_PRESET_CREATIVE_TEMP=0.85 -TRADEHAX_PRESET_RESEARCH_TEMP=0.45 -TRADEHAX_PRESET_FALLBACK_TEMP=0.2 - -TRADEHAX_PRESET_NAVIGATOR_TOPP=0.9 -TRADEHAX_PRESET_OPERATOR_TOPP=0.92 -TRADEHAX_PRESET_ANALYST_TOPP=0.86 -TRADEHAX_PRESET_CREATIVE_TOPP=0.97 -TRADEHAX_PRESET_RESEARCH_TOPP=0.9 -TRADEHAX_PRESET_FALLBACK_TOPP=0.8 - -# ----------------------------- -# Domain routing & canary policy -# ----------------------------- -TRADEHAX_MODEL_STOCK=Qwen/Qwen2.5-7B-Instruct -TRADEHAX_MODEL_CRYPTO=Qwen/Qwen2.5-7B-Instruct -TRADEHAX_MODEL_KALSHI=Qwen/Qwen2.5-7B-Instruct -TRADEHAX_MODEL_GENERAL=Qwen/Qwen2.5-7B-Instruct - -# Optional canary models -TRADEHAX_MODEL_STOCK_CANARY= -TRADEHAX_MODEL_CRYPTO_CANARY= -TRADEHAX_MODEL_KALSHI_CANARY= -TRADEHAX_MODEL_GENERAL_CANARY= - -TRADEHAX_CANARY_MIN_REQUESTS=40 -TRADEHAX_CANARY_MIN_CONFIDENCE_GAIN=3 -TRADEHAX_CANARY_MAX_FALLBACK_RATE=18 -TRADEHAX_CANARY_MAX_FALLBACK_DELTA=6 -TRADEHAX_CANARY_HYSTERESIS=1.5 -TRADEHAX_CANARY_COOLDOWN_MINUTES=30 -TRADEHAX_CANARY_WINDOW_SIZE=240 -TRADEHAX_CANARY_ROLLOUT_PERCENT=15 -TRADEHAX_CANARY_PROMOTION_STREAK=3 -TRADEHAX_CANARY_ROLLBACK_STREAK=2 - -# Adaptive smartness + persistent routing memory -TRADEHAX_BENCHMARK_GOVERNANCE_ENABLED=true -TRADEHAX_SMARTNESS_TRAFFIC_WINDOW_MIN=5 -TRADEHAX_SMARTNESS_TRAFFIC_ELEVATED_RPM=60 -TRADEHAX_SMARTNESS_TRAFFIC_HIGH_RPM=120 -TRADEHAX_AI_ROUTING_MEMORY_STORAGE=memory -TRADEHAX_SUPABASE_AI_ROUTING_MEMORY_TABLE=ai_routing_memory_events -TRADEHAX_INDIVIDUALIZED_AI_ENABLED=true -TRADEHAX_PERSONALIZED_TRAJECTORY_ENABLED=true -TRADEHAX_PERSONALIZED_TRAJECTORY_STORAGE=memory -TRADEHAX_SUPABASE_AI_TRAJECTORY_TABLE=ai_personalized_trajectory_events -TRADEHAX_TRAJECTORY_HORIZON_HOURS=72 -TRADEHAX_TRAJECTORY_MIN_SAMPLES=14 -TRADEHAX_COMPLEX_PROBLEM_ENGINE_ENABLED=true -TRADEHAX_ACCURACY_GOVERNOR_ENABLED=true -TRADEHAX_RESPONSE_VERIFIER_ENABLED=true -TRADEHAX_RETRAIN_EXPORT_QUEUE_ENABLED=true -TRADEHAX_RETRAIN_EXPORT_QUEUE_STORAGE=memory -TRADEHAX_SUPABASE_AI_RETRAIN_QUEUE_TABLE=ai_retrain_export_queue - -# ----------------------------- -# Safety controls -# ----------------------------- -TRADEHAX_LLM_OPEN_MODE=false -TRADEHAX_IMAGE_OPEN_MODE=false - -# ----------------------------- -# Free-tier AI allowance policy -# ----------------------------- -TRADEHAX_FREE_AI_MINUTES_WEEKLY=30 -TRADEHAX_AI_EST_SECONDS_PER_REQUEST=20 -TRADEHAX_AI_BURST_WINDOW_SECONDS=300 -TRADEHAX_AI_BURST_MAX_REQUESTS=30 -TRADEHAX_USAGE_IDEMPOTENCY_TTL_SECONDS=180 - -# ----------------------------- -# Operational metadata -# ----------------------------- -NODE_ENV=development -NEXTAUTH_URL=http://localhost:3000 -VERCEL_AUTOMATION_BYPASS_SECRET=otWDNt0dMxhdDDdkNEPwodop676ofPP1 - -# ----------------------------- -# Production rollout & feature flags -# ----------------------------- -# Phase 3 intelligence rollout -HUB_PHASE3_ENABLED=false -HUB_PHASE3_NEXT_ACTIONS_ENABLED=false -HUB_PHASE3_ADAPTIVE_ONBOARDING_ENABLED=false -HUB_PHASE3_GROUNDED_SOURCES_ENABLED=false -HUB_PHASE3_AUTOMATION_GUARDRAILS_ENABLED=false - -# ODIN / superuser lane rollout -HUB_ODIN_ENABLED=false -HUB_ODIN_MISSION_CONTROL_ENABLED=false -HUB_ODIN_RUNBOOKS_ENABLED=false -HUB_ODIN_REASONING_TRACE_ENABLED=false -HUB_ODIN_DETERMINISTIC_MODE_ENABLED=false -HUB_ODIN_MODEL_ARBITRATION_ENABLED=false -HUB_ODIN_TOOL_SANDBOX_ENABLED=false -HUB_ODIN_COMMAND_PALETTE_ENABLED=false - -# Comma-separated allowlist emails/user IDs with superuser capability. -HUB_ODIN_SUPERUSER_ALLOWLIST= - -# ----------------------------- -# Runtime telemetry & observability -# ----------------------------- -# Optional but recommended for production incident response. -SENTRY_DSN= -SENTRY_AUTH_TOKEN= -SENTRY_ORG= -SENTRY_PROJECT= - -# Analytics / product telemetry endpoint keys -POSTHOG_KEY= -POSTHOG_HOST= - -# ----------------------------- -# Governance / runtime controls -# ----------------------------- -# Fail closed on critical provider outages in production. -TRADEHAX_FAIL_CLOSED_ON_PROVIDER_OUTAGE=false - -# Maximum fallback hops in model arbitration chain. -TRADEHAX_MODEL_FALLBACK_MAX_HOPS=2 - -# Percentage rollout for ODIN lane (0-100) -HUB_ODIN_ROLLOUT_PERCENT=0 - -# ----------------------------- -# Secure owner/admin authentication -# ----------------------------- -NEXTAUTH_SECRET=replace_with_32_plus_char_random_secret -TRADEHAX_LOGIN_USERNAME=tradehax-admin -AI_SERVER_API_KEY=replace_with_strong_ai_server_key -# Prefer hashed password format: scrypt$N$r$p$base64_salt$base64_hash -TRADEHAX_LOGIN_PASSWORD_HASH= -# Optional fallback (not recommended for production) -TRADEHAX_LOGIN_PASSWORD= - -# ----------------------------- -# OAuth providers (optional) -# ----------------------------- -GOOGLE_CLIENT_ID= -GOOGLE_CLIENT_SECRET= -FACEBOOK_CLIENT_ID= -FACEBOOK_CLIENT_SECRET= - -# X/Twitter OAuth (any pair name is accepted) -X_CLIENT_ID= -X_CLIENT_SECRET= -TWITTER_CLIENT_ID= -TWITTER_CLIENT_SECRET= -TWITTER_ID= -TWITTER_SECRET= - -# ----------------------------- -# User data encryption (required in production) -# ----------------------------- -# Strong secret used to encrypt account-scoped user profile/context at rest. -# Recommended: 32-byte random key, base64 encoded. -TRADEHAX_USER_DATA_ENCRYPTION_KEY= - -# ----------------------------- -# Personal assistant webhook ingestion -# ----------------------------- -# HMAC secret for /api/intelligence/webhooks/personal -TRADEHAX_PERSONAL_WEBHOOK_SECRET=replace_with_long_random_secret diff --git a/AI_LIVE_ENV_BLUEPRINT.env b/AI_LIVE_ENV_BLUEPRINT.env deleted file mode 100644 index 2b6d81a8..00000000 --- a/AI_LIVE_ENV_BLUEPRINT.env +++ /dev/null @@ -1,174 +0,0 @@ -# ============================================================ -# TradeHax AI + Live Intelligence Production Blueprint -# Fill this file and map values into Vercel Environment Variables -# ============================================================ - -# ---------------------------- -# REQUIRED: Deployment Binding -# ---------------------------- -VERCEL_TOKEN= -VERCEL_PROJECT_ID=prj_lnkhGxnBl7Yx3YWMNVxE1sWOXUUf -VERCEL_AUTOMATION_BYPASS_SECRET=otWDNt0dMxhdDDdkNEPwodop676ofPP1 - -# ---------------------------- -# REQUIRED: Namecheap CI/CD (GitHub Actions secrets) -# Source of truth: -# - .github/workflows/namecheap-vps-deploy.yml -# - scripts/check-namecheap-deploy-config.js -# Configure in: GitHub -> Repo -> Settings -> Secrets and variables -> Actions -# ---------------------------- -NAMECHEAP_VPS_HOST= -NAMECHEAP_VPS_USER= -NAMECHEAP_VPS_SSH_KEY= - -# OPTIONAL but strongly recommended (GitHub Actions secrets) -NAMECHEAP_VPS_PORT=22 -NAMECHEAP_APP_ROOT=/var/www/tradehax -NAMECHEAP_APP_PORT=3000 - -# OPTIONAL cross-env deploy metadata -NEXT_PUBLIC_SITE_URL= - -# ---------------------------- -# REQUIRED: Core AI Inference -# ---------------------------- -HF_API_TOKEN= -HF_API_TOKEN_REICH= -HF_MODEL_ID=Qwen/Qwen2.5-7B-Instruct - -# Optional AI tuning -LLM_TEMPERATURE=0.85 -LLM_MAX_LENGTH=768 -LLM_TOP_P=0.95 -HF_FALLBACK_MODELS=Qwen/Qwen2.5-7B-Instruct,meta-llama/Meta-Llama-3-8B-Instruct,HuggingFaceH4/zephyr-7b-beta - -# Image generation -HF_IMAGE_MODEL_ID=stabilityai/stable-diffusion-2-1 -HF_IMAGE_STEPS=30 -HF_IMAGE_GUIDANCE_SCALE=6.5 -HF_IMAGE_NEGATIVE_PROMPT_DEFAULT=blurry, low quality, watermark, logo, text overlay, disfigured - -# ---------------------------- -# REQUIRED: Security + Control -# ---------------------------- -TRADEHAX_ADMIN_KEY= -AI_SERVER_API_KEY= -TRADEHAX_SUPERUSER_CODE= -TRADEHAX_CRON_SECRET= -NEXTAUTH_SECRET= -JWT_SECRET= - -# Private portal admin credentials -TRADEHAX_LOGIN_USERNAME=admin -# Generate via: npm run auth:hash-password -TRADEHAX_LOGIN_PASSWORD_HASH= - -# ---------------------------- -# REQUIRED: Persistence -# ---------------------------- -SUPABASE_URL= -SUPABASE_SERVICE_ROLE_KEY= - -# ---------------------------- -# RECOMMENDED: Provider Routing -# ---------------------------- -# vendor | mock -INTELLIGENCE_DATA_PROVIDER=vendor -INTELLIGENCE_VENDOR_NAME=unusualwhales - -# Configure at least one premium vendor for true live-quality signals -UNUSUALWHALES_API_KEY= -POLYGON_API_KEY= -FINNHUB_API_KEY= -BLOOMBERG_API_KEY= -BPIPE_TOKEN= - -# Provider base/endpoint tuning (optional) -UNUSUALWHALES_BASE_URL=https://api.unusualwhales.com -UNUSUALWHALES_FLOW_ENDPOINT= -UNUSUALWHALES_DARK_POOL_ENDPOINT= -UNUSUALWHALES_NEWS_ENDPOINT= -POLYGON_BASE_URL=https://api.polygon.io -BLOOMBERG_PROXY_BASE_URL= -BLOOMBERG_FLOW_ENDPOINT= -BLOOMBERG_DARK_POOL_ENDPOINT= -BLOOMBERG_NEWS_ENDPOINT= -TRADEHAX_INTELLIGENCE_CACHE_MS=15000 - -# ---------------------------- -# RECOMMENDED: Live WS Overlay -# ---------------------------- -TRADEHAX_INTELLIGENCE_WS_ENABLED=true -TRADEHAX_INTELLIGENCE_WS_URL= -TRADEHAX_INTELLIGENCE_WS_PROTOCOL= -TRADEHAX_INTELLIGENCE_WS_RECONNECT_MS=4000 - -# ---------------------------- -# RECOMMENDED: Retrieval + Vector -# ---------------------------- -TRADEHAX_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 -UPSTASH_VECTOR_REST_URL= -UPSTASH_VECTOR_REST_TOKEN= -TRADEHAX_HF_DATASET_INTEL_ENABLED=true -TRADEHAX_HF_DATASET_INTEL_LIMIT=4 - -# ---------------------------- -# RECOMMENDED: Messaging + Dispatch -# ---------------------------- -TRADEHAX_DISCORD_WEBHOOK= -TRADEHAX_DISCORD_SIGNAL_WEBHOOK= -TRADEHAX_DISCORD_DEFAULT_THREAD_ID= -DISCORD_PUBLIC_KEY= -DISCORD_APPLICATION_ID= -TELEGRAM_BOT_TOKEN= -TELEGRAM_CHAT_ID= - -# ---------------------------- -# OPTIONAL: Deploy Hooks -# ---------------------------- -TRADEHAX_VERCEL_DEPLOY_HOOK_URL= -TRADEHAX_VERCEL_DEPLOY_HOOK_URL_PRODUCTION= -TRADEHAX_VERCEL_DEPLOY_HOOK_URL_PREVIEW= - -# ---------------------------- -# OPTIONAL: Open-mode Guardrails -# ---------------------------- -TRADEHAX_LLM_OPEN_MODE=false -TRADEHAX_IMAGE_OPEN_MODE=true - -# ---------------------------- -# OPTIONAL: Canary Governance -# ---------------------------- -TRADEHAX_CANARY_ROLLOUT_PERCENT=15 -TRADEHAX_CANARY_MIN_REQUESTS=40 -TRADEHAX_CANARY_MIN_CONFIDENCE_GAIN=3 -TRADEHAX_CANARY_MAX_FALLBACK_RATE=18 -TRADEHAX_CANARY_MAX_FALLBACK_DELTA=5 -TRADEHAX_CANARY_COOLDOWN_MINUTES=30 -TRADEHAX_CANARY_WINDOW_SIZE=240 - -# ---------------------------- -# RECOMMENDED: Advanced AI governance switches -# ---------------------------- -TRADEHAX_INDIVIDUALIZED_AI_ENABLED=true -TRADEHAX_PERSONALIZED_TRAJECTORY_ENABLED=true -TRADEHAX_PERSONALIZED_TRAJECTORY_STORAGE=memory -TRADEHAX_SUPABASE_AI_TRAJECTORY_TABLE=ai_personalized_trajectory_events -TRADEHAX_TRAJECTORY_HORIZON_HOURS=72 -TRADEHAX_TRAJECTORY_MIN_SAMPLES=14 -TRADEHAX_COMPLEX_PROBLEM_ENGINE_ENABLED=true -TRADEHAX_ACCURACY_GOVERNOR_ENABLED=true -TRADEHAX_RESPONSE_VERIFIER_ENABLED=true -TRADEHAX_RETRAIN_EXPORT_QUEUE_ENABLED=true -TRADEHAX_RETRAIN_EXPORT_QUEUE_STORAGE=memory -TRADEHAX_SUPABASE_AI_RETRAIN_QUEUE_TABLE=ai_retrain_export_queue - -# ---------------------------- -# ONE-PASS DEPLOY UNBLOCK CHECKLIST -# ---------------------------- -# 1) Set REQUIRED Namecheap GitHub secrets above. -# 2) Keep Vercel runtime vars in Vercel Project Settings -> Environment Variables. -# 3) Run local strict gate: npm run ide:sync:deploy-ready -# 4) Trigger deploy: npm run deploy:launch -# 5) If skipped, verify only these are missing: -# NAMECHEAP_VPS_HOST, NAMECHEAP_VPS_USER, NAMECHEAP_VPS_SSH_KEY diff --git a/AI_NAVIGATOR_IMPLEMENTATION_PLAN.md b/AI_NAVIGATOR_IMPLEMENTATION_PLAN.md deleted file mode 100644 index 04d5cb7c..00000000 --- a/AI_NAVIGATOR_IMPLEMENTATION_PLAN.md +++ /dev/null @@ -1,53 +0,0 @@ -# Site AI Navigator + Behavior Intelligence Plan - -## What is now implemented (Phase 1) - -- Global **Site Navigator widget** available across the site (`Need Help?` launcher). -- New API route: `POST /api/ai/navigator` - - Interprets intent (onboarding, trading, billing, game, AI tools, etc.) - - Returns route suggestions with confidence - - Uses Hugging Face model when available, deterministic fallback when not - - Logs behavior + metadata (route, intent, suggestion count) -- New API route: `POST /api/ai/behavior/track` - - Tracks user behavior and metadata events from frontend/backend -- New admin dataset route: `GET /api/ai/admin/site-dataset` - - `?format=json` for structured rows - - `?format=jsonl` for fine-tuning export -- New interpreter + site knowledge modules: - - `lib/ai/site-intent.ts` - - `lib/ai/site-map.ts` - - `lib/ai/site-dataset.ts` - -## Proper place to store LLM + user input data - -### Recommended canonical store (production) - -- **Postgres-backed managed database** as system of record -- Schema starter: `db/supabase/ai_behavior_foundation.sql` -- Store only **pseudonymous user keys** (hashed) -- Keep sensitive values redacted before persistence - -### Why this is the right first storage layer - -- Query-friendly for analytics and model training slices -- Durable across serverless runtime instances (unlike in-memory runtime store) -- Supports RLS and service-role ingestion policy - -## New user directions (UX behavior) - -The widget is your onboarding chatbot: - -1. User clicks **Need Help?** -2. User asks a goal-oriented question (e.g., β€œWhere do I start?”) -3. Navigator returns concise next steps and clickable route chips -4. Click behavior is logged for future model tuning - -This makes navigation self-service while collecting structured β€œintent -> route” data. - -## Next upgrades (Phase 2) - -1. Wire `ingestBehavior` to a Postgres write-through adapter -2. Add daily ETL to export training sets by intent cluster -3. Add semantic retrieval (RAG) over internal docs and route descriptions -4. Add admin dashboard page for behavior heatmaps + prompt funnels -5. Add consent center toggle UI for analytics/training preferences diff --git a/AI_SETUP_SUMMARY.md b/AI_SETUP_SUMMARY.md deleted file mode 100644 index f03801f4..00000000 --- a/AI_SETUP_SUMMARY.md +++ /dev/null @@ -1,56 +0,0 @@ -# Status: βœ… Training Dataset Ready - -## Setup Complete - -Your Hugging Face LLM integration is fully configured: - -### βœ… What's Running -- **LLM API Endpoints**: `/api/ai/generate`, `/api/ai/chat`, `/api/ai/summarize`, `/api/ai/stream` -- **Demo Page**: `/ai` with chat and generator components -- **Training Dataset**: `ai-training-set.jsonl` (26 Q&A pairs) -- **API Token**: Configured in `.env.local` - -### πŸ“Š Training Dataset -- **File**: `ai-training-set.jsonl` -- **Size**: 5.7 KB -- **Entries**: 26 instruction-response pairs -- **Topics**: TradeHax features, gameplay, security, tokens, NFTs - -### πŸš€ Next Steps - -**1. Upload Dataset to Hugging Face** -- Visit: https://huggingface.co/new-dataset -- Create dataset: `tradehax-behavioral` -- Upload file: `ai-training-set.jsonl` -- Or use: `node scripts/upload-training-data.js` (requires `HF_API_TOKEN` env var) - -**2. Test LLM Integration** -```bash -npm run dev -# Visit http://localhost:3000/ai -``` - -**3. Fine-tune Your Model** -Once dataset is uploaded, you can: -- Fine-tune Mistral-7B with your data -- Use for RAG (Retrieval-Augmented Generation) -- Expand with more training examples - -### πŸ” Security Note -Your HF token is safely stored in `.env.local` (gitignored). -Never commit tokens to git. - -### πŸ“ Files Added -- `lib/ai/hf-client.ts` - Hugging Face client -- `lib/ai/hf-server.ts` - Server config -- `app/ai/page.tsx` - Demo hub page -- `app/api/ai/*` - 4 API endpoints -- `components/ai/*` - Chat & Generator components -- `ai-training-set.jsonl` - Training data -- `scripts/upload-training-data.js` - Upload script -- `HF_SETUP_GUIDE.md` - Complete setup docs -- `HF_DATASET_UPLOAD.md` - Dataset upload instructions - ---- - -**Your AI system is ready!** πŸŽ‰ diff --git a/API_DOCUMENTATION.md b/API_DOCUMENTATION.md deleted file mode 100644 index 8f2fba52..00000000 --- a/API_DOCUMENTATION.md +++ /dev/null @@ -1,510 +0,0 @@ -# API Documentation - -## Overview - -This document describes the available API endpoints in the TradeHax AI platform. All API routes are built with Next.js App Router and are located in the `app/api/` directory. - -## Base URL - -- **Development**: `http://localhost:3000/api` -- **Production**: `https://tradehaxai.tech/api` - -## Authentication - -Currently, API endpoints do not require authentication. Authentication will be added in future updates for protected endpoints. - -## API Endpoints - -### 1. Claim API - -#### GET /api/claim - -Check the status of the claim endpoint. - -**Request:** -```bash -curl -X GET https://tradehaxai.tech/api/claim -``` - -**Response:** -```json -{ - "status": "ok", - "message": "Claim endpoint is live." -} -``` - -**Status Codes:** -- `200 OK`: Endpoint is operational - ---- - -#### POST /api/claim - -Submit a claim for rewards. - -**Request:** -```bash -curl -X POST https://tradehaxai.tech/api/claim \ - -H "Content-Type: application/json" \ - -d '{ - "walletAddress": "Your_Solana_Wallet_Address", - "amount": 100, - "claimType": "daily_reward" - }' -``` - -**Request Body:** -```json -{ - "walletAddress": "string (required)", - "amount": "number (optional)", - "claimType": "string (optional)" -} -``` - -**Response:** -```json -{ - "status": "ok", - "received": { - "walletAddress": "Your_Solana_Wallet_Address", - "amount": 100, - "claimType": "daily_reward" - } -} -``` - -**Status Codes:** -- `200 OK`: Claim processed successfully -- `400 Bad Request`: Invalid request data -- `500 Internal Server Error`: Server error - -**Notes:** -- This endpoint is currently a placeholder and returns the received data -- Full claim processing logic will be implemented based on business requirements -- May require wallet signature verification in the future - ---- - -### 2. Subscribe API - -#### POST /api/subscribe - -Subscribe to the newsletter. - -**Request:** -```bash -curl -X POST https://tradehaxai.tech/api/subscribe \ - -H "Content-Type: application/json" \ - -d '{"email": "user@example.com"}' -``` - -**Request Body:** -```json -{ - "email": "string (required, valid email format)" -} -``` - -**Success Response:** -```json -{ - "success": true -} -``` - -**Error Response:** -```json -{ - "success": false, - "error": "Invalid email address" -} -``` - -**Status Codes:** -- `200 OK`: Subscription successful -- `400 Bad Request`: Invalid email format -- `500 Internal Server Error`: Server error - -**Email Validation:** -- Must contain `@` symbol -- Must be a valid email format - -**Notes:** -- Currently logs email to console -- Integration with email service provider (Mailchimp, SendGrid, etc.) is needed -- No duplicate email checking implemented yet - ---- - -### 3. Unified LLM API - -#### GET /api/llm - -Returns route capabilities for the unified LLM automation endpoint. - -**Response:** -```json -{ - "ok": true, - "endpoint": "/api/llm", - "tasks": ["generate", "summarize", "qa", "chat"], - "defaultModel": "mistralai/Mistral-7B-Instruct-v0.1" -} -``` - ---- - -#### POST /api/llm - -Single endpoint for common text tasks. This wraps the Hugging Face server client with built-in request validation, rate limiting, and monetization usage tracking. - -**Request Body:** -```json -{ - "task": "generate | summarize | qa | chat", - "prompt": "required for generate", - "text": "required for summarize", - "context": "required for qa, optional for chat", - "question": "required for qa", - "messages": [{ "role": "user", "content": "hello" }], - "model": "optional model id", - "temperature": 0.7, - "maxTokens": 512, - "topP": 0.95, - "userId": "optional user id" -} -``` - -**Success Response:** -```json -{ - "ok": true, - "task": "generate", - "result": "model output", - "model": "mistralai/Mistral-7B-Instruct-v0.1", - "settings": { - "temperature": 0.7, - "maxTokens": 512, - "topP": 0.95 - }, - "usage": { - "feature": "ai_chat", - "remainingToday": 149 - } -} -``` - -**Error Cases:** -- `400` for invalid/missing task inputs -- `415` for non-JSON payloads -- `429` for per-minute or daily feature limit excess -- `500` for upstream generation failures - ---- - -## Error Handling - -All API endpoints follow a consistent error response format: - -```json -{ - "success": false, - "error": "Error message description", - "code": "ERROR_CODE" // Optional -} -``` - -### Common Error Codes - -| Code | Description | -|------|-------------| -| `INVALID_EMAIL` | Email address is not valid | -| `MISSING_REQUIRED_FIELD` | Required field is missing from request | -| `INTERNAL_ERROR` | Internal server error occurred | -| `INVALID_WALLET` | Wallet address is invalid | -| `RATE_LIMIT_EXCEEDED` | Too many requests | - ---- - -## Rate Limiting - -**Current Status**: Not implemented - -**Planned Implementation**: -- 100 requests per minute per IP -- 1000 requests per hour per IP -- Rate limit headers will be included in responses - -Example rate limit headers: -``` -X-RateLimit-Limit: 100 -X-RateLimit-Remaining: 95 -X-RateLimit-Reset: 1609459200 -``` - ---- - -## CORS Configuration - -CORS is configured in `vercel.json` to allow requests from: -- `https://tradehaxai.tech` -- `https://www.tradehaxai.tech` - -Development mode allows `localhost` origins. - ---- - -## Request Headers - -### Required Headers - -``` -Content-Type: application/json -``` - -### Optional Headers - -``` -Accept: application/json -User-Agent: Your-App-Name/1.0 -``` - ---- - -## Security - -### Implemented Security Measures - -1. **Input Validation**: All inputs are validated before processing -2. **Error Sanitization**: Error messages don't expose sensitive information -3. **HTTPS Only**: Production endpoints require HTTPS -4. **CSP Headers**: Content Security Policy headers are enforced -5. **XSS Protection**: Cross-site scripting protection enabled - -### Recommended Client-Side Practices - -1. Always use HTTPS in production -2. Validate data before sending to API -3. Handle errors gracefully -4. Implement proper timeout handling -5. Don't expose API responses directly to users - ---- - -## Examples - -### JavaScript/TypeScript (Fetch API) - -```typescript -// Subscribe to newsletter -async function subscribeToNewsletter(email: string) { - try { - const response = await fetch('/api/subscribe', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ email }), - }); - - const data = await response.json(); - - if (data.success) { - console.log('Subscription successful!'); - } else { - console.error('Subscription failed:', data.error); - } - } catch (error) { - console.error('Network error:', error); - } -} - -// Claim rewards -async function claimReward(walletAddress: string) { - try { - const response = await fetch('/api/claim', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ - walletAddress, - claimType: 'daily_reward' - }), - }); - - const data = await response.json(); - console.log('Claim response:', data); - } catch (error) { - console.error('Claim error:', error); - } -} -``` - -### React Component Example - -```tsx -import { useState } from 'react'; - -function NewsletterSubscribe() { - const [email, setEmail] = useState(''); - const [status, setStatus] = useState<'idle' | 'loading' | 'success' | 'error'>('idle'); - - const handleSubmit = async (e: React.FormEvent) => { - e.preventDefault(); - setStatus('loading'); - - try { - const response = await fetch('/api/subscribe', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ email }), - }); - - const data = await response.json(); - - if (data.success) { - setStatus('success'); - setEmail(''); - } else { - setStatus('error'); - } - } catch { - setStatus('error'); - } - }; - - return ( -
- setEmail(e.target.value)} - placeholder="Enter your email" - required - /> - - {status === 'success' &&

Subscribed successfully!

} - {status === 'error' &&

Subscription failed. Please try again.

} -
- ); -} -``` - -### Python Example - -```python -import requests -import json - -# Subscribe to newsletter -def subscribe_to_newsletter(email): - url = "https://tradehaxai.tech/api/subscribe" - payload = {"email": email} - headers = {"Content-Type": "application/json"} - - response = requests.post(url, json=payload, headers=headers) - return response.json() - -# Claim rewards -def claim_reward(wallet_address): - url = "https://tradehaxai.tech/api/claim" - payload = { - "walletAddress": wallet_address, - "claimType": "daily_reward" - } - headers = {"Content-Type": "application/json"} - - response = requests.post(url, json=payload, headers=headers) - return response.json() - -# Usage -result = subscribe_to_newsletter("user@example.com") -print(result) -``` - ---- - -## Future Enhancements - -### Planned Features - -1. **Authentication & Authorization** - - JWT-based authentication - - API key management - - OAuth integration - -2. **Rate Limiting** - - Per-user rate limits - - IP-based throttling - - Configurable limits - -3. **WebSocket Support** - - Real-time updates - - Live trading data - - Notification system - -4. **Additional Endpoints** - - User management - - Trading history - - Portfolio analytics - - NFT operations - -5. **Enhanced Claim System** - - Wallet signature verification - - Transaction history - - Reward calculation - - Anti-fraud measures - -6. **Email Service Integration** - - Mailchimp/SendGrid integration - - Email verification - - Unsubscribe handling - - Template management - ---- - -## Testing - -### Testing the API Locally - -1. Start the development server: -```bash -npm run dev -``` - -2. Test endpoints using curl, Postman, or browser DevTools - -### Automated Testing - -```bash -# Coming soon -npm run test:api -``` - ---- - -## Support - -For API-related issues or questions: -- Open an issue on GitHub -- Check the main [README.md](../README.md) for general documentation -- Email: support@tradehaxai.tech - ---- - -## Changelog - -### v1.0.0 (Current) -- Initial API documentation -- Claim endpoint (placeholder) -- Subscribe endpoint (functional) -- Basic error handling - ---- - -**Last Updated**: January 27, 2026 diff --git a/BLOG_PAGE_VERIFICATION.md b/BLOG_PAGE_VERIFICATION.md deleted file mode 100644 index f30d275d..00000000 --- a/BLOG_PAGE_VERIFICATION.md +++ /dev/null @@ -1,229 +0,0 @@ -# βœ… BLOG PAGE VERIFICATION - COMPLETE - -**Date:** March 9, 2026 -**URL:** https://web-psi-nine-26.vercel.app/blog/automated-trading-strategies-2026 -**Status:** 🟒 **ACCESSIBLE & RENDERING** - ---- - -## βœ… PAGE VERIFICATION RESULTS - -### HTTP Response -- **Status Code:** 200 OK βœ… -- **Content Length:** 88,553 bytes βœ… -- **Content-Type:** text/html; charset=utf-8 βœ… -- **Server:** Vercel βœ… - -### Content Verification -- **Blog Title:** "Automated Trading Strategies That Survive Volatility" βœ… -- **Article Markup:** `
` tag present βœ… -- **Blog Content:** "layered exits" content found βœ… -- **Header Component:** Shamrock header rendering βœ… - -### Page Structure -``` -βœ… Page metadata generation working -βœ… Static params generation working -βœ… Blog post slug routing functional -βœ… Content HTML rendering correctly -βœ… Styling applied (88KB with CSS) -``` - ---- - -## πŸ“„ BLOG POST DETAILS - -**Slug:** `automated-trading-strategies-2026` -**Title:** Automated Trading Strategies That Survive Volatility -**Excerpt:** How to design bots for uncertain markets using layered exits, signal filtering, and execution constraints. -**Category:** Strategy -**Featured:** Yes -**Author:** TradeHax AI Team -**Date:** February 8, 2026 -**Read Time:** 11 minutes - -### Content Sections -1. **Layered Exits** - - Combine hard stop-loss, volatility stop, and time-based exits - - Avoid single-point failure in logic - -2. **Filter Weak Signals** - - Require multiple confirmations (momentum + order flow + liquidity context) - - Before entry execution - -3. **Constrain Execution** - - Cap maximum slippage - - Disable entries during low-liquidity windows - - Good bots skip bad trades - ---- - -## πŸ”§ TECHNICAL IMPLEMENTATION - -### File Structure -``` -app/ -β”œβ”€β”€ blog/ -β”‚ └── [slug]/ -β”‚ └── page.tsx βœ… Blog post routing -lib/ -β”œβ”€β”€ content/ -β”‚ └── blog-posts.ts βœ… Blog content + functions -``` - -### Key Functions (Working) -```typescript -βœ… getAllBlogPosts() - Returns all posts sorted by date -βœ… getBlogPostBySlug(slug) - Returns specific post -βœ… generateStaticParams() - Static generation for all posts -βœ… generateMetadata() - SEO metadata per post -``` - -### Page Rendering -```typescript -βœ… Async component rendering -βœ… Dynamic metadata generation -βœ… Proper error handling (404 on missing) -βœ… Date formatting and display -βœ… Category badge rendering -βœ… AdSense blocks integrated -βœ… Premium banner displayed -βœ… Email capture form included -βœ… Read time calculation -βœ… Footer component rendering -``` - ---- - -## πŸ“± DEVICE COMPATIBILITY - -### Desktop -βœ… Full-width article layout -βœ… Proper padding and spacing -βœ… Readable font sizes -βœ… Sidebar components visible - -### Tablet -βœ… Responsive grid layout -βœ… Touch-friendly buttons -βœ… Optimized spacing - -### Mobile -βœ… Stack layout (vertical) -βœ… Readable on small screens -βœ… Proper font sizing - ---- - -## 🎯 FUNCTIONALITY VERIFICATION - -### Page Load -- βœ… 200ms initial load -- βœ… No 404 errors -- βœ… No JavaScript errors -- βœ… CSS loads correctly - -### Content Display -- βœ… Title displays -- βœ… Metadata displays (date, read time, author) -- βœ… Category badge shows -- βœ… Article content renders -- βœ… HTML formatting preserved - -### Interactive Elements -- βœ… Navigation links functional -- βœ… Email capture functional -- βœ… AdSense blocks loaded -- βœ… Premium banner displays - -### SEO -- βœ… Meta title: "Automated Trading Strategies... - TradeHax AI Blog" -- βœ… Meta description: "How to design bots for uncertain markets..." -- βœ… Proper heading hierarchy (H1, H2) -- βœ… Structured content - ---- - -## πŸ“Š PERFORMANCE METRICS - -**Metrics:** -- Load Time: < 1 second βœ… -- Content Download: 88.5KB βœ… -- Server Response: < 100ms βœ… -- Rendering: Immediate βœ… - -**Lighthouse Estimated Scores:** -- Performance: 90+ βœ… -- Accessibility: 95+ βœ… -- Best Practices: 95+ βœ… -- SEO: 98+ βœ… - ---- - -## βœ… DEPLOYMENT VERIFICATION - -### Current Deployment -- **Platform:** Vercel -- **URL:** https://web-psi-nine-26.vercel.app -- **Status:** Production βœ… -- **Blog Route:** /blog/[slug] βœ… - -### Static Generation -- **Total Blog Posts:** 6 βœ… -- **Static Routes Generated:** All post slugs βœ… -- **Pre-rendered:** Yes βœ… -- **Cache:** Long-lived βœ… - ---- - -## 🎯 NEXT STEPS COMPLETED - -βœ… **Blog post structure verified** -- Content exists in blog-posts.ts -- slug matches URL route -- All required fields present - -βœ… **Page routing confirmed** -- [slug] dynamic route working -- generateStaticParams() executed -- Page renders without 404 - -βœ… **Content rendering validated** -- HTML properly generated -- CSS applied correctly -- Components render as expected - -βœ… **Performance optimized** -- Fast load time -- Proper caching headers -- Minimal bundle size - ---- - -## πŸ“‹ CHECKLIST - -| Item | Status | -|------|--------| -| Blog page accessible | βœ… | -| Title renders correctly | βœ… | -| Content displays | βœ… | -| Metadata shows | βœ… | -| Responsive design | βœ… | -| SEO optimized | βœ… | -| Performance good | βœ… | -| No errors | βœ… | -| Mobile friendly | βœ… | -| Interactive elements work | βœ… | - ---- - -## πŸš€ CONCLUSION - -The blog page at `https://web-psi-nine-26.vercel.app/blog/automated-trading-strategies-2026` is **fully functional, properly deployed, and optimized for production**. - -**Status:** 🟒 **LIVE & OPERATIONAL** - ---- - -_Verification completed March 9, 2026_ - diff --git a/BUILD_COMPLETE.md b/BUILD_COMPLETE.md deleted file mode 100644 index 89203182..00000000 --- a/BUILD_COMPLETE.md +++ /dev/null @@ -1,408 +0,0 @@ -╔════════════════════════════════════════════════════════════════════════════╗ -β•‘ β•‘ -β•‘ βœ… BUILD SUCCESSFUL - PRODUCTION READY β•‘ -β•‘ β•‘ -β•‘ npm install & npm run build Complete β•‘ -β•‘ β•‘ -β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - - -πŸ“Š BUILD SUMMARY - -═════════════════════════════════════════════════════════════════════════════ - -βœ… NPM INSTALL - Command: npm install --include=dev --legacy-peer-deps --ignore-scripts - Status: SUCCESS - - Installed: - β€’ 1,225 npm packages - β€’ 1,228 total packages audited - β€’ 227 packages available for funding - β€’ 31 vulnerabilities (27 low, 4 moderate) - acceptable - -βœ… NPM BUILD - Command: npm run build - Status: SUCCESS - - Build Output: - β€’ .next/ directory created - β€’ 102 KB First Load JS (shared by all pages) - β€’ 70+ dynamic and static pages compiled - β€’ API routes: 50+ endpoints ready - β€’ Optimizations: Image, CSS, JavaScript minified - - -═════════════════════════════════════════════════════════════════════════════ - - -🎯 BUILD ARTIFACTS - -═════════════════════════════════════════════════════════════════════════════ - -Location: ./next/ - -Contents: -βœ… .next/server/ - Server-side code & API handlers -βœ… .next/static/ - Client-side JavaScript & CSS -βœ… .next/cache/ - Build cache for incremental builds -βœ… .next/diagnostics/ - Build diagnostics -βœ… app-build-manifest.json -βœ… BUILD_ID - Build identifier (for caching) -βœ… required-server-files.json - -Key Files: -βœ… .next/routes-manifest.json - All route definitions -βœ… .next/prerender-manifest.json - Pre-rendered pages -βœ… .next/app-path-routes-manifest.json - App directory routes - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ“ˆ PAGES & ROUTES COMPILED - -═════════════════════════════════════════════════════════════════════════════ - -Dynamic API Routes (50+): - βœ… /api/hf-server (Hugging Face inference) - βœ… /api/ai/chat (AI chat endpoint) - βœ… /api/ai/generate (Text generation) - βœ… /api/ai/generate-image (Image generation) - βœ… /api/monetization/* (Payment processing) - βœ… /api/intelligence/* (Market intelligence) - βœ… /api/trading/* (Trading bot APIs) - βœ… + 40+ more - -Static Pages (Prerendered): - βœ… / (Home) - βœ… /pricing - βœ… /dashboard - βœ… /trading - βœ… /intelligence - βœ… /services - βœ… + 30+ more - -Server-Side Rendered Pages: - βœ… /blog/[slug] (Dynamic blog) - βœ… /game (Interactive pages) - βœ… /schedule (User scheduling) - βœ… + Dynamic routes - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸš€ DEPLOYMENT OPTIONS - -═════════════════════════════════════════════════════════════════════════════ - -Option 1: VERCEL (Recommended) -────────────────────────────── - -Build Output: Ready (.next/ directory) -Deployment: 1 command away - -Steps: - 1. Commit changes: git add . && git commit -m "build: production-ready build" - 2. Push to GitHub: git push origin main - 3. Vercel auto-deploys from GitHub - -Deployment Time: 5-10 minutes -Result: https://tradehax.net (live on Vercel) - - -Option 2: NAMECHEAP cPANEL -───────────────────────── - -Build Output: Ready (.next/ directory) -Deployment: Via Node.js app setup - -Steps: - 1. SSH to server: ssh traddhou@199.188.201.164 - 2. Upload .next/ directory to /home/traddhou/public_html - 3. cPanel > Setup Node.js App > Create - 4. Start with PM2: pm2 start .next/server - -Deployment Time: 10-20 minutes -Result: https://tradehax.net (live on Namecheap) - - -Option 3: Docker (If Using VPS) -─────────────────────────────── - -Build Output: Ready for containerization -Dockerfile Needed: Create Dockerfile to use .next/ output - -Deployment Time: 15-30 minutes - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ“‹ PRODUCTION DEPLOYMENT CHECKLIST - -═════════════════════════════════════════════════════════════════════════════ - -Pre-Deployment: - [ ] All source code committed to GitHub - [ ] Build successful (npm run build completed) - [ ] .next/ directory exists and contains output - [ ] Environment variables configured (.env.example β†’ .env) - [ ] HF_API_TOKEN set and valid - [ ] NEXTAUTH_SECRET generated - [ ] No TypeScript errors: npm run type-check - [ ] No linting errors: npm run lint - -For Vercel: - [ ] Latest code pushed to GitHub - [ ] Vercel connected to GitHub repo - [ ] Environment variables set in Vercel dashboard - [ ] Build command verified: npm run build - [ ] Output directory: .next - -For Namecheap: - [ ] .next/ directory uploaded to public_html - [ ] node_modules uploaded OR npm install run on server - [ ] .env file with all variables set - [ ] cPanel > Setup Node.js App configured - [ ] PM2 configured and started - [ ] Apache proxy (.htaccess) configured - [ ] HTTPS/SSL active - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ”§ BUILD CONFIGURATION - -═════════════════════════════════════════════════════════════════════════════ - -Next.js Configuration (next.config.ts): - -βœ… React Strict Mode: Enabled (development) -βœ… Static Export: Disabled (needs server for dynamic routes) -βœ… Output: Default (server-side rendering ready) -βœ… Image Optimization: Enabled with remote patterns -βœ… Compression: Enabled -βœ… Cache TTL: 60 seconds (images) -βœ… Experimental: optimizePackageImports, optimisticClientCache - -TypeScript: -βœ… Strict mode enabled -βœ… All types resolve correctly -βœ… Next.js types included - -ESLint: -βœ… No errors found (during build) -βœ… All linting rules pass - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ“Š BUILD METRICS - -═════════════════════════════════════════════════════════════════════════════ - -Package Stats: - β€’ Total packages: 1,228 - β€’ Production: 1,225 - β€’ Security vulnerabilities: 31 (27 low, 4 moderate) - β€’ Funding available: 227 packages - -Build Output: - β€’ First Load JS: 102 KB (shared by all) - β€’ Main chunks: 45.5 KB + 54.2 KB - β€’ Total size: ~200 KB (gzipped) - β€’ Build time: ~2 minutes - β€’ Pre-render: 70+ pages - -Performance: - β€’ Image optimization: AVIF + WebP formats - β€’ JavaScript minification: Enabled - β€’ CSS optimization: Enabled - β€’ Cache busting: BUILD_ID managed - - -═════════════════════════════════════════════════════════════════════════════ - - -βœ… NEXT STEPS FOR DEPLOYMENT - -═════════════════════════════════════════════════════════════════════════════ - -IMMEDIATE (Choose One): - -1. VERCEL DEPLOYMENT (Easiest): - $ git add . - $ git commit -m "build: production-ready build" - $ git push origin main - [Vercel auto-deploys in 5-10 minutes] - -2. NAMECHEAP DEPLOYMENT (Self-Hosted): - $ node scripts/namecheap-cpanel-deployment.js - [Follow the generated guide] - $ bash scripts/deploy-to-namecheap.sh - [Or use cPanel UI to setup Node.js app] - -3. PREVIEW LOCALLY (Optional): - $ npm start - [Starts server on http://localhost:3000] - [Visit https://localhost:3000 in browser] - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ§ͺ TESTING ENDPOINTS - -═════════════════════════════════════════════════════════════════════════════ - -After Deployment, Test: - -1. Home Page: - GET https://tradehax.net - Expected: 200 OK, page loads - -2. HF Inference API: - POST /api/hf-server - { - "prompt": "Give me a BTC market brief", - "task": "text-generation" - } - Expected: 200 OK with generated text - -3. Image Generation: - POST /api/hf-server - { - "prompt": "Trading chart with bull flag", - "task": "image-generation" - } - Expected: 200 OK with image blob - -4. Monetization: - GET /api/monetization/plans - Expected: 200 OK with subscription tiers - -5. Health Check: - GET /api/health (if implemented) - Expected: 200 OK - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ“ BUILD NOTES - -═════════════════════════════════════════════════════════════════════════════ - -βœ… Build Uses Server-Side Rendering: - β€’ Dynamic API routes require server - β€’ OAuth/Auth requires server - β€’ Database access requires server - β€’ NOT a static export - -βœ… All Dependencies Installed: - β€’ Production: npm install --production (for deployment) - β€’ Development: npm install --include=dev (what was done) - β€’ Legacy peer deps: --legacy-peer-deps flag used - -βœ… Build Warnings (Benign): - β€’ Deprecated @toruslabs/solana-embed: Not active, won't affect functionality - β€’ WalletConnect warnings: Library is actively maintained, safe to ignore - β€’ @stellar/stellar-sdk postinstall: Yarn not needed, build succeeded - -βœ… Vulnerabilities Assessment: - β€’ 27 low severity: Can be addressed with npm audit fix - β€’ 4 moderate severity: Review before production (optional) - β€’ No critical vulnerabilities: Safe to deploy - -βœ… File Permissions: - β€’ .env: chmod 600 (set on Namecheap) - β€’ .next: Readable by Node.js process - β€’ public/: Served by web server - - -═════════════════════════════════════════════════════════════════════════════ - - -🎯 PRODUCTION READINESS CHECKLIST - -═════════════════════════════════════════════════════════════════════════════ - -Code Quality: - βœ… Build succeeded without errors - βœ… No type errors - βœ… No linting errors - βœ… All dependencies resolved - βœ… Production optimizations applied - -Functionality: - βœ… API endpoints compiled (50+) - βœ… Dynamic routes ready - βœ… Static pages pre-rendered - βœ… Image optimization configured - βœ… Authentication ready (NextAuth) - -Configuration: - βœ… Environment template (.env.example) - βœ… API routes configured - βœ… Database optional (configured if needed) - βœ… Third-party APIs ready - -Deployment: - βœ… .next build artifacts ready - βœ… Vercel ready (1-click from GitHub) - βœ… Namecheap ready (cPanel setup) - βœ… Process management ready (PM2) - βœ… Monitoring ready (logs, metrics) - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ“ž DEPLOYMENT SUPPORT - -═════════════════════════════════════════════════════════════════════════════ - -Deployment Guides: - β€’ DEPLOYMENT_PATHS.md - Choose your deployment - β€’ COMPLETE_AUTOMATION_GUIDE.md - Vercel steps - β€’ NAMECHEAP_CPANEL_DEPLOYMENT.md - cPanel steps - -Automation Scripts: - β€’ scripts/complete-automation.js - Comprehensive guide - β€’ scripts/setup-vercel-deployment.js - Vercel automation - β€’ scripts/namecheap-cpanel-deployment.js - cPanel automation - -Contact: - Email: darkmodder33@proton.me - GitHub: https://github.com/DarkModder33/main - - -═════════════════════════════════════════════════════════════════════════════ - - -✨ STATUS: PRODUCTION BUILD COMPLETE βœ… - -═════════════════════════════════════════════════════════════════════════════ - -Build Output: .next/ directory βœ… -API Routes: 50+ endpoints βœ… -Pages: 70+ routes βœ… -Size: ~200 KB (gzipped) βœ… -Dependencies: 1,225 installed βœ… -Errors: None βœ… -Warnings: Benign only βœ… -Security: No critical vulnerabilities βœ… - -READY FOR DEPLOYMENT! - -Next: Choose deployment path and run: - β€’ Vercel: git push origin main - β€’ Namecheap: bash scripts/deploy-to-namecheap.sh - -Time to Live: 5-20 minutes - -═════════════════════════════════════════════════════════════════════════════ diff --git a/CLEANUP_SUMMARY.md b/CLEANUP_SUMMARY.md deleted file mode 100644 index cc2a5223..00000000 --- a/CLEANUP_SUMMARY.md +++ /dev/null @@ -1,235 +0,0 @@ -# Repository Cleanup Summary - -**Date:** February 3, 2026 -**Status:** Complete βœ… - -## Overview - -This cleanup reorganized the repository to make it easier to maintain and identify production code. Legacy content and historical documentation were moved to an `/archive` directory rather than deleted, ensuring nothing is lost. - -## What Was Kept (Production Code) - -### Core Application -- βœ… `/app` - Next.js application with all three modes: - - Main site (landing, services, blog) - - Game mode (Hyperborea game at `/game`) - - Portfolio/Music/Todos sections -- βœ… `/components` - React components for UI -- βœ… `/lib` - Utility libraries -- βœ… `/public` - Static assets (images, fonts, etc.) -- βœ… `/types` - TypeScript type definitions -- βœ… `/anchor-idl` - Solana IDL files (used by dashboard counter) -- βœ… `/scripts` - Git hooks and utility scripts -- βœ… `/tools` - Image processing utilities - -### Configuration Files -- βœ… `package.json` & `package-lock.json` -- βœ… `next.config.ts` (updated to exclude archive from build) -- βœ… `tsconfig.json` (updated to exclude archive) -- βœ… `tailwind.config.ts` -- βœ… `vercel.json` -- βœ… `eslint.config.mjs` -- βœ… `postcss.config.mjs` -- βœ… `components.json` -- βœ… `.gitignore` -- βœ… `.vercelignore` -- βœ… `.env.example` - -### Essential Documentation -- βœ… `README.md` - Main project documentation -- βœ… `SECURITY.md` - Security policies -- βœ… `DEPLOYMENT_QUICKSTART.md` - Quick deployment guide -- βœ… `GITHUB_SECRETS_SETUP.md` - GitHub secrets configuration -- βœ… `VERCEL_DOMAIN_SETUP.md` - Domain setup guide -- βœ… `VERCEL_DEPLOYMENT_TROUBLESHOOTING.md` - Troubleshooting guide -- βœ… `API_DOCUMENTATION.md` - API reference -- βœ… `INTEGRATION_GUIDE.md` - Integration guide -- βœ… `MONETIZATION_GUIDE.md` - Monetization setup -- βœ… `TESTING_GUIDE.md` - Testing guide -- βœ… `QUICK_START.md` - Quick start guide - -## What Was Archived - -### Documentation Moved to `/archive/docs` (59 files) - -#### Implementation Summaries & Phase Reports -- CLEANUP_COMPLETE.md -- COMPLETION_SUMMARY.md -- IMPLEMENTATION_SUMMARY_OLD.md -- IMPLEMENTATION_COMPLETE.md -- FINAL_SUMMARY.md -- ISSUE_51_IMPLEMENTATION_SUMMARY.md -- NFT_IMPLEMENTATION_SUMMARY.md -- DELIVERABLES_SUMMARY.md -- MIGRATION_SUMMARY.md -- REBUILD_COMPLETE.md -- REBUILD_SUMMARY.md -- SETUP_COMPLETE.md -- DEPLOYMENT_COMPLETE.md -- PHASE_2_COMPLETION_REPORT.md -- HYPERBOREA_PHASE3_LAUNCH.md - -#### Duplicate/Redundant Deployment Guides -- DEPLOYMENT.md -- DEPLOYMENT_SUMMARY.md -- DEPLOYMENT_IMPLEMENTATION_SUMMARY.md -- PRODUCTION_DEPLOYMENT_SUMMARY.md -- DEPLOYMENT_SYNC_GUIDE.md -- DEPLOYMENT_SYNC_IMPLEMENTATION.md -- DEPLOYMENT_FLOW_DIAGRAM.md -- DEPLOYMENT_CHECKLIST.md -- DEPLOYMENT_GUIDE.md -- DEPLOYMENT_QUICK_REF.md -- QUICK_DEPLOY.md -- QUICK_DEPLOY_CHECKLIST.md -- SETUP_INSTRUCTIONS_FOR_OWNER.md - -#### Duplicate Domain/DNS Guides -- DOMAIN_SETUP.md -- DOMAIN_SETUP_GUIDE.md -- DNS_SETUP_INSTRUCTIONS.md -- VERCEL_DNS_SETUP.md -- VERCEL_DEPLOYMENT_CHECKLIST.md - -#### Feature-Specific Documentation -- AI_LLM_INTEGRATION.md -- AI_PROMPTS.md -- AUDIO_DEPLOYMENT_GUIDE.md -- CRYPTO_PAGE_SECURITY_AUDIT.md -- EMULATOR_README.md -- MONETIZATION_SETUP.md -- NFT_MINT_GUIDE.md -- PAYPAL_SETUP_GUIDE.md -- ROM_LIBRARY.md -- ROM_MANIFEST.md -- SHAMROCK_SETUP.md -- TASK_SYSTEM_README.md -- TODO.md - -#### Security Audits (keeping SECURITY.md) -- SECURITY_AUDIT_REPORT.md -- SECURITY_AUDIT_REPORT_2025.md -- SECURITY_FIX.md -- SECURITY_HARDENING.md - -#### API/Integration Docs -- QUICK_API_REFERENCE.md -- VERCEL_API_SETUP.md -- VERCEL_ANALYTICS.md - -#### Project Status & History -- ISSUES_AND_RESOLUTIONS.md -- CURRENT_STATUS.md -- INDEX.md -- INTEGRATION_SUMMARY.md -- LAUNCH_CHECKLIST.md - -#### Duplicate READMEs -- README_PRODUCTION.md (duplicate of README.md) -- PROJECT_STRUCTURE.md (covered in README.md) - -### Code Moved to `/archive/legacy-code` - -#### Root-Level JavaScript Files (Not Used by Next.js App) -- main.js -- clover-exchange.js -- clover-goals.js -- spades-engine.js -- spades-game.js -- play-timer.js -- play-timer-integration.js -- web3-rewards.js -- server.js (standalone server, not used by Next.js) -- update-backend-config.js -- config.js - -#### Test Scripts -- test-critical-path.js -- test-thorough.js -- test-endpoints.mjs - -#### Solana/Anchor Program (Separate from Next.js App) -- `/program` directory - Rust-based Solana program -- Anchor.toml - Anchor configuration - -#### Legacy Game Files -- `/legacy-games` directory - Old HTML-based games (Hyperborea now in Next.js app) - -#### Configuration Files (Not Used) -- Makefile -- wrangler.toml (Cloudflare Workers, not used with Vercel) -- schema.json -- deploy-mainnet.sh (Solana deployment script) - -#### AI Assistant Configurations -- `.azure` directory -- `.zencoder` directory - -### Old Portfolio Moved to `/archive/portfolio-old` -- Old static HTML portfolio site -- The portfolio is now integrated into the Next.js app at `/app/portfolio` - -## What Was Deleted - -### Obsolete Files (7 files) -- `games.js` - Empty file -- `server.log` - Log file -- `index.html.old` - Old HTML file -- `.vscode-janus-debug` - Empty debug file -- `sample.env` - Duplicate of .env.example -- `MichaelFlaherty_Resume.html` - Personal file -- `MichaelSFlahertyResume.pdf` - Personal file - -## Verification - -βœ… Build test passed: `npm run build` succeeds -βœ… Dev server test passed: `npm run dev` starts successfully -βœ… All Next.js pages build correctly: - - Main site (/) - - Game (/game) - Hyperborea - - Dashboard (/dashboard) - - Portfolio (/portfolio) - - Music (/music) - - Todos (/todos) - - Blog (/blog) - - Services (/services) - -## Configuration Changes - -### next.config.ts -- No changes needed (build automatically excludes archive) - -### tsconfig.json -- Added `archive` to exclude list to prevent TypeScript from compiling archived files - -## Summary Statistics - -- **Documentation files archived:** 59 -- **Code files archived:** 14+ individual files + 3 directories (legacy-games, program, portfolio) -- **Files deleted:** 7 (obsolete/empty files) -- **Essential docs kept at root:** 11 -- **Production code directories:** Unchanged (app, components, lib, public, types) - -## Production Website Status - -βœ… **All three modes working:** -1. **Main Website** - Trading platform landing page, services, blog -2. **Game Mode** - Hyperborea game (3D Escher-inspired maze game) -3. **Portfolio/Music/Todos** - Personal sections and task management - -βœ… **Build:** Passes successfully -βœ… **Dev Server:** Starts without errors -βœ… **Deployment:** Ready for Vercel deployment - -## Next Steps - -1. Test the site thoroughly in a browser -2. Deploy to Vercel staging environment -3. Verify all features work as expected -4. Merge to production - -## Notes - -- All archived content is preserved and can be restored if needed -- The archive directory is excluded from TypeScript compilation and Next.js build -- Production functionality is completely unaffected by this cleanup diff --git a/COMPLETE_AUTOMATION_GUIDE.md b/COMPLETE_AUTOMATION_GUIDE.md deleted file mode 100644 index ca76144d..00000000 --- a/COMPLETE_AUTOMATION_GUIDE.md +++ /dev/null @@ -1,485 +0,0 @@ -# πŸŽ‰ TradeHax HF Fine-Tuning: COMPLETE AUTOMATION GUIDE - -## What's Ready - -βœ… **Production API Endpoint** (`/api/hf-server`) -- Text generation (Mistral-7B) -- Image generation (Stable Diffusion 2.1) -- Fully configured in latest commit (140f250) - -βœ… **Fine-Tuning Pipeline** -- Mistral-7B LoRA training -- 4-bit quantization support -- Auto-hub push capability -- CPU fallback guidance included - -βœ… **Environment Configuration** -- `.env.example` with all required variables -- Payment subscription flag enabled -- Multi-model routing configured -- Canary deployment settings included - -βœ… **Automation Scripts** (Just Added - Commit 9b57288) -- `scripts/validate-deployment.js` - Pre-deployment validation -- `scripts/setup-vercel-deployment.js` - Vercel configuration -- `scripts/complete-automation.js` - Full orchestration -- `scripts/test-inference.js` - Post-deployment testing -- `scripts/deploy-to-vercel.sh` - Automated deployment - ---- - -## πŸš€ COMPLETE AUTOMATION WORKFLOW - -### Step 1: Run Comprehensive Automation Audit (1 minute) - -```bash -node scripts/complete-automation.js -``` - -This generates: -- Full repository validation -- Environment configuration review -- Readiness checklist -- File verification -- Deployment workflow guide - -**Expected Output:** -``` -βœ… On main branch -βœ… Working tree clean -βœ… All required files present -βœ… app/api/hf-server/route.ts configured -βœ… Environment variables defined -βœ… Monetization flag enabled -``` - ---- - -### Step 2: Run Pre-Deployment Validation (2 minutes) - -```bash -node scripts/validate-deployment.js -``` - -This checks: -- `.env` file exists and configured -- Git state (branch, commits, clean) -- Required files in place -- Inference endpoint configured -- Fine-tuning setup complete -- Monetization enabled -- Generates inference test checklist -- Provides deployment checklist - -**Expected Output:** -``` -βœ… .env file exists -βœ… On main branch -βœ… All core files present -βœ… API endpoint validated -βœ… Fine-tuning script ready -βœ… Monetization enabled -``` - ---- - -### Step 3: Configure Vercel Deployment (5 minutes) - -**Option A: Automated (Requires Vercel CLI)** - -```bash -# Install Vercel CLI if needed -npm install -g vercel - -# Run setup automation -node scripts/setup-vercel-deployment.js - -# Follow the generated script -bash scripts/deploy-to-vercel.sh -``` - -**Option B: Manual Steps** - -1. Go to https://vercel.com/dashboard -2. Select project: **tradehax** -3. Settings β†’ Environment Variables -4. Add these variables: - - ``` - HF_API_TOKEN = your_hf_token_here (KEEP SECRET) - HF_MODEL_ID = mistralai/Mistral-7B-Instruct-v0.1 - NEXT_PUBLIC_ENABLE_PAYMENTS = true - LLM_TEMPERATURE = 0.85 - LLM_MAX_LENGTH = 768 - HF_IMAGE_MODEL_ID = stabilityai/stable-diffusion-2-1 - ``` - -5. Click **Save** - ---- - -### Step 4: Trigger Deployment (5 minutes) - -**Option A: Via Vercel CLI** - -```bash -vercel deploy --prod -``` - -**Option B: Via GitHub** - -```bash -git add . -git commit -m "chore: finalize HF fine-tuning setup" -git push origin main -# Auto-deploys to Vercel -``` - -**Option C: Manual** - -1. Vercel Dashboard β†’ Deployments -2. Click "Deploy" or redeploy from latest commit -3. Wait for build to complete (green checkmark) - -**Monitor Progress:** -```bash -# Watch build logs -vercel logs - -# Check deployment status -vercel status -``` - ---- - -### Step 5: Post-Deployment Testing (3 minutes) - -**Option A: Run Automated Tests** - -```bash -node scripts/test-inference.js -``` - -**Option B: Manual curl Tests** - -```bash -# Test 1: Text Generation -curl -X POST https://tradehax.net/api/hf-server \ - -H "Content-Type: application/json" \ - -d '{"prompt":"Give me a concise BTC/ETH market brief.","task":"text-generation"}' - -# Expected Response: -# { "output": [ { "generated_text": "..." } ] } - -# Test 2: Image Generation -curl -X POST https://tradehax.net/api/hf-server \ - -H "Content-Type: application/json" \ - -d '{"prompt":"Trading chart with candlestick pattern","task":"image-generation"}' - -# Expected Response: -# { "output": } -``` - -**Expected Results:** -- βœ… Status 200 -- βœ… Valid JSON response -- βœ… No errors in Vercel logs -- βœ… Response time < 10s - ---- - -### Step 6: Switch to Fine-Tuned Model (Optional, After Training) - -After successful fine-tuning and Hub push: - -```bash -# 1. Update Vercel environment -vercel env set HF_MODEL_ID=your-org/tradehax-mistral-finetuned - -# 2. Trigger redeployment -vercel deploy --prod - -# 3. Re-test -node scripts/test-inference.js -``` - ---- - -## πŸ“‹ AUTOMATED CHECKLISTS - -### Pre-Deployment Checklist - -```bash -# 1. Validate setup -node scripts/validate-deployment.js - -# 2. Check git state -git status -git log -1 - -# 3. Verify environment -cat .env.example | grep "HF_\|NEXT_PUBLIC_ENABLE" - -# 4. Test locally (optional) -npm run dev -# curl -X POST http://localhost:3000/api/hf-server ... -``` - -### Vercel Deployment Checklist - -```bash -# 1. Verify repository pushed -git push origin main - -# 2. Check environment variables in Vercel -vercel env list - -# 3. Trigger production deployment -vercel deploy --prod - -# 4. Monitor build -vercel logs --follow - -# 5. Verify live deployment -curl https://tradehax.net -``` - -### Post-Deployment Checklist - -```bash -# 1. Run inference tests -node scripts/test-inference.js - -# 2. Check Vercel metrics -vercel analytics - -# 3. Monitor logs for errors -vercel logs | grep -i error - -# 4. Verify monetization flag -curl https://tradehax.net/api/monetization/check - -# 5. Test model switching (post fine-tune) -vercel env set HF_MODEL_ID=your-org/tradehax-mistral-finetuned -vercel deploy --prod -``` - ---- - -## πŸ”‘ KEY FILES & WHAT THEY DO - -| File | Purpose | Latest Change | -|------|---------|----------------| -| `app/api/hf-server/route.ts` | Live inference endpoint | 140f250 | -| `.env.example` | Configuration template | 140f250 | -| `HF_FINE_TUNING_WORKFLOW.md` | Training documentation | 140f250 | -| `scripts/validate-deployment.js` | Pre-deployment validation | 9b57288 | -| `scripts/setup-vercel-deployment.js` | Vercel automation | 9b57288 | -| `scripts/complete-automation.js` | Full orchestration | 9b57288 | -| `scripts/test-inference.js` | Endpoint testing | 9b57288 | -| `scripts/deploy-to-vercel.sh` | Automated deployment | 9b57288 | - ---- - -## 🎯 COMMAND QUICK REFERENCE - -| Task | Command | -|------|---------| -| **Pre-deployment audit** | `node scripts/complete-automation.js` | -| **Validate setup** | `node scripts/validate-deployment.js` | -| **Setup Vercel** | `node scripts/setup-vercel-deployment.js` | -| **Deploy** | `vercel deploy --prod` or `bash scripts/deploy-to-vercel.sh` | -| **Test endpoints** | `node scripts/test-inference.js` | -| **View logs** | `vercel logs` | -| **Check status** | `vercel status` | -| **Fine-tune locally** | `npm run llm:finetune:workflow:push` | -| **Check environment** | `vercel env list` | -| **Update env var** | `vercel env set KEY=value` | - ---- - -## πŸ“Š DEPLOYMENT FLOW DIAGRAM - -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ 1. Run Automation Audit β”‚ -β”‚ node scripts/complete-automation.js β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ - β–Ό -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ 2. Pre-Deployment Validation β”‚ -β”‚ node scripts/validate-deployment.js β”‚ -β”‚ βœ… Check: git state, files, env, endpoints β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ - β–Ό -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ 3. Push to GitHub & Configure Vercel β”‚ -β”‚ git push origin main β”‚ -β”‚ node scripts/setup-vercel-deployment.js β”‚ -β”‚ β€’ Set HF_API_TOKEN (secret) β”‚ -β”‚ β€’ Set HF_MODEL_ID β”‚ -β”‚ β€’ Enable NEXT_PUBLIC_ENABLE_PAYMENTS β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ - β–Ό -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ 4. Deploy to Production β”‚ -β”‚ vercel deploy --prod β”‚ -β”‚ [Wait for build completion] β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ - β–Ό -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ 5. Post-Deployment Testing β”‚ -β”‚ node scripts/test-inference.js β”‚ -β”‚ βœ… Test: /api/hf-server endpoints β”‚ -β”‚ βœ… Verify: response quality, latency β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ - β–Ό -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ 6. Monitor & Optimize (Optional) β”‚ -β”‚ vercel logs β”‚ -β”‚ vercel analytics β”‚ -β”‚ β€’ Track error rates, latency β”‚ -β”‚ β€’ Monitor model performance β”‚ -β”‚ β€’ Fine-tune as needed β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - ---- - -## πŸ” TROUBLESHOOTING - -| Issue | Cause | Solution | -|-------|-------|----------| -| `HF_API_TOKEN not found` | Secret not set in Vercel | `vercel env set HF_API_TOKEN=hf_...` | -| `Model not found` | Wrong model ID | Check HF Hub, update `HF_MODEL_ID` | -| `POST 500 error` | Inference failed | Check Vercel logs: `vercel logs` | -| `Deploy fails` | Missing env var | Run `vercel env list`, add missing vars | -| `Inference slow` | Model loading | First call warm-up normal (~5-10s) | -| `Model unauthorized` | Permissions | Check model is public or token has access | - ---- - -## βœ… SUCCESS CRITERIA - -**Deployment is successful when:** - -- βœ… `node scripts/validate-deployment.js` shows all green -- βœ… Build succeeds on Vercel (green checkmark) -- βœ… `node scripts/test-inference.js` passes all tests -- βœ… `https://tradehax.net` responds 200 -- βœ… `/api/hf-server` returns valid JSON for prompts -- βœ… Vercel logs show no errors -- βœ… Response time < 10 seconds -- βœ… Monetization flag enabled and tested - ---- - -## πŸ“ž SUPPORT - -**For automation help or issues:** -- Email: darkmodder33@proton.me -- GitHub: https://github.com/DarkModder33/main -- Hugging Face: https://huggingface.co/your-org/tradehax-mistral-finetuned - ---- - -## πŸŽ“ COMPLETE WORKFLOW (Copy & Paste) - -```bash -# Step 1: Validate -echo "Step 1: Validation..." -node scripts/validate-deployment.js - -# Step 2: Push to GitHub -echo "Step 2: Push to GitHub..." -git add . -git commit -m "chore: finalize HF setup" || true -git push origin main - -# Step 3: Configure Vercel (automated) -echo "Step 3: Setup Vercel..." -node scripts/setup-vercel-deployment.js - -# Or manual (uncomment): -# echo "Step 3: Manual Vercel config" -# echo "Go to https://vercel.com/dashboard" -# echo "Add HF_API_TOKEN and HF_MODEL_ID" -# read -p "Press enter when done..." - -# Step 4: Deploy -echo "Step 4: Deploy to Vercel..." -vercel deploy --prod - -# Step 5: Test -echo "Step 5: Testing..." -sleep 5 # Wait for deployment -node scripts/test-inference.js - -echo "βœ… Deployment complete!" -``` - ---- - -## πŸ“ˆ MONITORING & OPTIMIZATION - -After deployment, monitor: - -```bash -# View all logs -vercel logs - -# Watch live logs -vercel logs --follow - -# Check analytics -vercel analytics - -# Monitor specific function -vercel logs /api/hf-server -``` - ---- - -## 🎁 WHAT YOU GET - -βœ… **Live API Endpoint** - `/api/hf-server` -- Text generation via Mistral-7B -- Image generation via Stable Diffusion -- Full production support - -βœ… **Monetization** - Premium subscriptions ready -- `NEXT_PUBLIC_ENABLE_PAYMENTS=true` -- `/api/monetization/*` routes enabled -- Billing integration ready - -βœ… **Fine-Tuning** - Custom model training -- LoRA adapters for efficiency -- 4-bit quantization support -- Auto Hub push - -βœ… **Automation** - Complete deployment workflow -- Pre-deployment validation -- Vercel configuration automation -- Post-deployment testing -- Monitoring scripts - ---- - -**Status:** πŸš€ **PRODUCTION READY** - -**Latest Commits:** -- `9b57288` - Automation scripts added -- `140f250` - API endpoint & config finalized -- `b3ca648` - Windows compatibility & fixes - -**Quick Start:** -```bash -node scripts/complete-automation.js -``` - -**Ready to deploy!** diff --git a/COMPLETE_DEPLOYMENT_GUIDE.md b/COMPLETE_DEPLOYMENT_GUIDE.md deleted file mode 100644 index 5d5301a7..00000000 --- a/COMPLETE_DEPLOYMENT_GUIDE.md +++ /dev/null @@ -1,15 +0,0 @@ -# Legacy Guide Notice - -This document is preserved as a legacy path only. - -Use the current canonical deployment docs instead: - -- `DOCS_INDEX.md` -- `DEPLOYMENT_QUICKSTART.md` - -If you need platform-specific details, use: - -- `VERCEL_DOMAIN_SETUP.md` -- `VERCEL_DEPLOYMENT_TROUBLESHOOTING.md` -- `NAMECHEAP_CPANEL_DEPLOYMENT.md` -- `NAMECHEAP_MIGRATION_CHECKLIST.md` diff --git a/CUSTOM_LLM_MODEL_PLAN.md b/CUSTOM_LLM_MODEL_PLAN.md deleted file mode 100644 index cd263fb1..00000000 --- a/CUSTOM_LLM_MODEL_PLAN.md +++ /dev/null @@ -1,48 +0,0 @@ -# Custom LLM Model Plan (TradeHax) - -This is the initial implementation path for your site-specific LLM stack. - -## Current Foundation Implemented - -- Custom endpoint: `POST /api/ai/custom` -- TradeHax persona/system prompt: - - `lib/ai/custom-llm/system-prompt.ts` -- Usage-gated inference aligned with subscription tiers: - - Reuses monetization engine and daily AI limits -- Dataset preparation script: - - `npm run llm:prepare-dataset` - - Outputs `data/custom-llm/train.jsonl` - -## Phase 1: Data and Prompt Quality - -1. Expand `ai-training-set.jsonl` with: - - Services conversion Q&A - - Billing objections and responses - - Crypto risk disclaimers - - Scheduling and emergency policy responses -2. Run `npm run llm:prepare-dataset`. -3. Validate 50+ prompt-response pairs manually. - -## Phase 2: Fine-Tune Pipeline - -1. Export `data/custom-llm/train.jsonl` to your training provider. -2. Fine-tune an instruct model (Mistral/Llama class). -3. Store resulting model ID in: - - `HF_MODEL_ID` or `TRADEHAX_CUSTOM_MODEL_ID` -4. Route `/api/ai/custom` to the tuned model in production. - -## Phase 3: Retrieval + Guardrails - -1. Add retrieval over docs: - - pricing, booking, services, tokenomics, launch policies. -2. Add policy checks for: - - financial advice boundaries - - unsupported claims - - security-sensitive prompts -3. Track response quality and conversion outcomes. - -## Launch KPI Targets - -- >= 30% of AI sessions open a CTA path (`/billing`, `/schedule`, `/services`) -- < 2% unsafe/invalid response rate -- Median latency < 2.5s for chat responses diff --git a/DEPLOYMENT_CHECKLIST.md b/DEPLOYMENT_CHECKLIST.md deleted file mode 100644 index eae0240a..00000000 --- a/DEPLOYMENT_CHECKLIST.md +++ /dev/null @@ -1,15 +0,0 @@ -# Legacy Checklist Notice - -This checklist has been superseded to reduce duplication. - -Use the canonical flow: - -1. `DOCS_INDEX.md` -2. `DEPLOYMENT_QUICKSTART.md` - -For deployment validation, run: - -- `npm run check:links` -- `npm run lint` -- `npm run type-check` -- `npm run build` diff --git a/DEPLOYMENT_FINAL_SUMMARY.md b/DEPLOYMENT_FINAL_SUMMARY.md deleted file mode 100644 index 039345b2..00000000 --- a/DEPLOYMENT_FINAL_SUMMARY.md +++ /dev/null @@ -1,10 +0,0 @@ -# Legacy Summary Notice - -This file is retained as a historical summary reference. - -For current production operations, use: - -- `DOCS_INDEX.md` -- `DEPLOYMENT_QUICKSTART.md` - -Deployment is complete only when code is pushed, deployment path succeeds, and live behavior matches. diff --git a/DEPLOYMENT_FIX_CHECKLIST.md b/DEPLOYMENT_FIX_CHECKLIST.md deleted file mode 100644 index 0b088790..00000000 --- a/DEPLOYMENT_FIX_CHECKLIST.md +++ /dev/null @@ -1,9 +0,0 @@ -# Legacy Fix Checklist Notice - -This checklist is deprecated in favor of the canonical deployment runbook. - -Use: - -- `DOCS_INDEX.md` -- `DEPLOYMENT_QUICKSTART.md` -- `VERCEL_DEPLOYMENT_TROUBLESHOOTING.md` diff --git a/DEPLOYMENT_FIX_SUMMARY.md b/DEPLOYMENT_FIX_SUMMARY.md deleted file mode 100644 index 821b17f3..00000000 --- a/DEPLOYMENT_FIX_SUMMARY.md +++ /dev/null @@ -1,8 +0,0 @@ -# Legacy Fix Summary Notice - -This summary is retained for historical context only. - -For active deployment operations, follow: - -- `DOCS_INDEX.md` -- `DEPLOYMENT_QUICKSTART.md` diff --git a/DEPLOYMENT_PATHS.md b/DEPLOYMENT_PATHS.md deleted file mode 100644 index 7be8e3bc..00000000 --- a/DEPLOYMENT_PATHS.md +++ /dev/null @@ -1,13 +0,0 @@ -# Legacy Deployment Paths Notice - -This file has been condensed into canonical deployment documentation. - -Use: - -- `DOCS_INDEX.md` -- `DEPLOYMENT_QUICKSTART.md` - -For platform-specific implementation: - -- `VERCEL_DOMAIN_SETUP.md` -- `NAMECHEAP_CPANEL_DEPLOYMENT.md` diff --git a/DEPLOYMENT_QUICKSTART.md b/DEPLOYMENT_QUICKSTART.md deleted file mode 100644 index 85938393..00000000 --- a/DEPLOYMENT_QUICKSTART.md +++ /dev/null @@ -1,128 +0,0 @@ -# Deployment Quickstart (Precision Guide) - -> Canonical doc navigation: [`DOCS_INDEX.md`](./DOCS_INDEX.md) - -## Purpose - -Use this guide to deploy the latest `main` changes with minimal ambiguity. - -**Current production objective:** `https://tradehax.net` - ---- - -## 1) Choose exactly one production path - -Do **not** mix these in the same release window. - -- **Path A β€” Vercel**: managed hosting and dashboard-driven deploys. -- **Path B β€” Namecheap VPS**: script-driven deploy workflow from this repository. - -If you are migrating from one path to the other, complete migration checklist steps first before expecting route changes to appear live. - ---- - -## 2) Preflight checks (always) - -Run local quality + structure checks before deploy: - -- `npm run check:links` -- `npm run lint` -- `npm run type-check` -- `npm run build` - -If these fail, fix locally before deployment. - ---- - -## 3) Path A β€” Vercel deployment - -### Required repository secrets (Vercel) - -Set in GitHub Actions secrets: - -- `VERCEL_TOKEN` -- `VERCEL_ORG_ID` -- `VERCEL_PROJECT_ID` - -### Required platform setup - -- Vercel project linked to this repository. -- Domain(s) configured in Vercel dashboard. -- Required environment variables present in Vercel. - -### Trigger deploy - -- Push to `main` **or** run your selected CI deploy workflow. - -### Verify live (Vercel) - -- Visit `https://tradehax.net`. -- Validate expected route removals/additions. -- Confirm deployment status is `Ready` in Vercel. - ---- - -## 4) Path B β€” Namecheap VPS deployment - -### Required repository secrets (Namecheap) - -Set in GitHub Actions secrets: - -- `NAMECHEAP_VPS_HOST` -- `NAMECHEAP_VPS_USER` -- `NAMECHEAP_VPS_SSH_KEY` - -Optional but recommended: - -- `NAMECHEAP_VPS_PORT` -- `NAMECHEAP_APP_ROOT` -- `NAMECHEAP_APP_PORT` - -### Trigger deploy from repo scripts - -- Run: `npm run deploy:launch` - -If deploy fails on `deploy:namecheap:check`, it means required Namecheap secrets are still missing. - -### Verify live (Namecheap) - -- Visit `https://tradehax.net`. -- Validate route behavior (e.g., removed routes return 404/redirect as intended). -- Confirm server logs/health checks for successful rollout. - ---- - -## 5) Definition of "deployed" - -A commit is considered deployed only when **all** are true: - -1. Commit exists on `origin/main`. -2. Selected deploy path completed successfully. -3. Live site behavior matches the commit. - ---- - -## 6) Fast troubleshooting - -- **Code pushed but live unchanged:** deploy path did not run or failed. -- **Namecheap deploy check fails:** missing required Namecheap secrets. -- **Vercel deploy succeeds but old content appears:** check project/domain mapping and cache. -- **Broken routes after cleanup:** run `npm run check:links` and fix stale links. - ---- - -## 7) Recommended operating cadence - -For precise change control: - -1. Make scoped changes. -2. Run local quality checks. -3. Commit and push. -4. Trigger one deploy path. -5. Verify live URLs immediately. - ---- - -**Last Updated:** 2026-03-05 -**Repository:** `DarkModder33/main` -**Canonical Production URL:** `https://tradehax.net` diff --git a/DEPLOYMENT_READY.txt b/DEPLOYMENT_READY.txt deleted file mode 100644 index 8f6cabeb..00000000 --- a/DEPLOYMENT_READY.txt +++ /dev/null @@ -1,218 +0,0 @@ -╔══════════════════════════════════════════════════════════════════════════════╗ -β•‘ πŸŽ‰ TRADEHAX KUBERNETES DEPLOYMENT READY πŸŽ‰ β•‘ -β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ βœ… CLUSTER STATUS β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ Kubernetes Version: v1.31.1 β”‚ -β”‚ Control Plane: https://127.0.0.1:56927 βœ… β”‚ -β”‚ Nodes: 1 (desktop-control-plane) - Ready βœ… β”‚ -β”‚ DNS: CoreDNS - Active βœ… β”‚ -β”‚ β”‚ -β”‚ GitLab Agent: β”‚ -β”‚ Status: Connected βœ… β”‚ -β”‚ Replicas: 2/2 Running β”‚ -β”‚ Namespace: gitlab-agent-gitlab1 β”‚ -β”‚ KAS Connection: wss://kas.gitlab.com βœ… β”‚ -β”‚ Agent ID: agentk:3161108 β”‚ -β”‚ Leader Pod: gitlab-agent-v2-7cf65d9858-5n7gq β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ πŸ“¦ DEPLOYMENT MANIFESTS β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ k8s/deployment.yaml (2.6 KB) βœ… TradeHax app pods β”‚ -β”‚ k8s/ingress.yaml (1.4 KB) βœ… DNS routing & TLS β”‚ -β”‚ k8s/nginx-ingress.yaml (6.9 KB) βœ… NGINX Ingress Controller β”‚ -β”‚ β”‚ -β”‚ Total Ready: 3 files, 10.9 KB β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ πŸš€ DEPLOYMENT ARCHITECTURE β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ β”‚ -β”‚ GitHub/GitLab Repository (main branch) β”‚ -β”‚ ↓ β”‚ -β”‚ CI/CD Pipeline (.gitlab-ci.yml) β”‚ -β”‚ β€’ Build Docker image β”‚ -β”‚ β€’ Push to GHCR (ghcr.io/darkmodder33/main:latest) β”‚ -β”‚ β€’ Deploy via GitLab Agent β”‚ -β”‚ ↓ β”‚ -β”‚ GitLab Agent (Local Kubernetes) β”‚ -β”‚ β€’ Receives deploy commands β”‚ -β”‚ β€’ Applies kubectl manifests β”‚ -β”‚ ↓ β”‚ -β”‚ NGINX Ingress Controller β”‚ -β”‚ β€’ HTTP/HTTPS termination β”‚ -β”‚ β€’ DNS routing (tradehax.net, tradehaxai.tech) β”‚ -β”‚ β€’ TLS certificates β”‚ -β”‚ ↓ β”‚ -β”‚ TradeHax Service (ClusterIP) β”‚ -β”‚ β€’ Port 80 β†’ Pod 3000 β”‚ -β”‚ ↓ β”‚ -β”‚ TradeHax Deployment (2-5 replicas) β”‚ -β”‚ β€’ Rolling updates (zero downtime) β”‚ -β”‚ β€’ Auto-scaling (HPA) β”‚ -β”‚ β€’ Health checks (liveness & readiness) β”‚ -β”‚ ↓ β”‚ -β”‚ TradeHax Pods (Running) β”‚ -β”‚ β€’ Node.js Next.js application β”‚ -β”‚ β€’ Hugging Face AI integration β”‚ -β”‚ β€’ Trading bot logic β”‚ -β”‚ β€’ Image generation β”‚ -β”‚ β€’ Smart environment β”‚ -β”‚ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ 🎯 ONE-COMMAND DEPLOYMENT β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ β”‚ -β”‚ kubectl apply -f k8s/ β”‚ -β”‚ β”‚ -β”‚ This will deploy: β”‚ -β”‚ β€’ NGINX Ingress Controller (ingress-nginx namespace) β”‚ -β”‚ β€’ TradeHax Deployment (2 replicas, scales to 5) β”‚ -β”‚ β€’ TradeHax Service (internal routing) β”‚ -β”‚ β€’ Ingress (DNS routing + TLS) β”‚ -β”‚ β€’ ConfigMap (environment variables) β”‚ -β”‚ β€’ Resource Quota (safe resource limits) β”‚ -β”‚ β”‚ -β”‚ Total deployment time: ~2-3 minutes β”‚ -β”‚ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ βš™οΈ AUTO-DEPLOY VIA GIT PUSH β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ β”‚ -β”‚ Setup: Already configured in .gitlab-ci.yml β”‚ -β”‚ β”‚ -β”‚ To trigger auto-deployment: β”‚ -β”‚ $ git push origin main β”‚ -β”‚ β”‚ -β”‚ What happens automatically: β”‚ -β”‚ 1. GitLab detects push to main β”‚ -β”‚ 2. CI/CD pipeline triggers β”‚ -β”‚ 3. Docker image built β”‚ -β”‚ 4. Image pushed to GHCR β”‚ -β”‚ 5. GitLab Agent receives deploy command β”‚ -β”‚ 6. kubectl applies new image β”‚ -β”‚ 7. Rolling update (no downtime) β”‚ -β”‚ 8. New version live at tradehax.net β”‚ -β”‚ β”‚ -β”‚ Time from push to live: 5-10 minutes β”‚ -β”‚ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ πŸ“Š SCALING & RESOURCE CONFIGURATION β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ β”‚ -β”‚ Replica Scaling: β”‚ -β”‚ β€’ Minimum: 2 pods β”‚ -β”‚ β€’ Maximum: 5 pods β”‚ -β”‚ β€’ Auto-scale trigger: CPU > 70% or Memory > 80% β”‚ -β”‚ β”‚ -β”‚ Per-Pod Resources: β”‚ -β”‚ β€’ CPU Request: 250m (guaranteed) β”‚ -β”‚ β€’ CPU Limit: 500m (max allowed) β”‚ -β”‚ β€’ Memory Request: 512Mi (guaranteed) β”‚ -β”‚ β€’ Memory Limit: 1Gi (max allowed) β”‚ -β”‚ β”‚ -β”‚ Health Checks: β”‚ -β”‚ β€’ Liveness probe: Check every 10s (restart if fails 3x) β”‚ -β”‚ β€’ Readiness probe: Check every 5s (remove from LB if fails 2x) β”‚ -β”‚ β”‚ -β”‚ Performance: β”‚ -β”‚ β€’ Pod startup: ~10-15 seconds β”‚ -β”‚ β€’ Request latency: <200ms (cached) β”‚ -β”‚ β€’ Throughput: 100+ req/s per pod β”‚ -β”‚ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ πŸ” VERIFICATION COMMANDS β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ β”‚ -β”‚ Check Deployment Status: β”‚ -β”‚ $ kubectl get all β”‚ -β”‚ $ kubectl get pods -o wide β”‚ -β”‚ $ kubectl get svc β”‚ -β”‚ $ kubectl get ingress β”‚ -β”‚ β”‚ -β”‚ View Logs: β”‚ -β”‚ $ kubectl logs -f deployment/tradehax-app β”‚ -β”‚ $ kubectl logs β”‚ -β”‚ β”‚ -β”‚ Port Forward (Local Testing): β”‚ -β”‚ $ kubectl port-forward svc/tradehax-service 3000:80 β”‚ -β”‚ $ Open: http://localhost:3000 β”‚ -β”‚ β”‚ -β”‚ Debugging: β”‚ -β”‚ $ kubectl describe pod β”‚ -β”‚ $ kubectl exec -it -- /bin/sh β”‚ -β”‚ $ kubectl get events --sort-by='.lastTimestamp' β”‚ -β”‚ β”‚ -β”‚ Metrics: β”‚ -β”‚ $ kubectl top nodes β”‚ -β”‚ $ kubectl top pods β”‚ -β”‚ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ βœ… PRODUCTION READINESS CHECKLIST β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ β”‚ -β”‚ [βœ…] Kubernetes cluster running (v1.31.1) β”‚ -β”‚ [βœ…] GitLab Agent connected (2 replicas) β”‚ -β”‚ [βœ…] Deployment manifests created β”‚ -β”‚ [βœ…] NGINX Ingress configured β”‚ -β”‚ [βœ…] CI/CD pipeline ready (.gitlab-ci.yml) β”‚ -β”‚ [βœ…] Docker image builds successfully β”‚ -β”‚ [βœ…] Container registry access (GHCR) β”‚ -β”‚ [βœ…] Resource limits configured β”‚ -β”‚ [βœ…] Health checks defined β”‚ -β”‚ [βœ…] Auto-scaling configured (HPA) β”‚ -β”‚ [βœ…] Deployment strategy (rolling updates) β”‚ -β”‚ [⏳] Domain DNS configured (Namecheap) β”‚ -β”‚ [⏳] TLS certificates (Let's Encrypt) β”‚ -β”‚ [⏳] Monitoring setup (optional) β”‚ -β”‚ [⏳] Backup strategy (optional) β”‚ -β”‚ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ πŸ“š DOCUMENTATION FILES β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ β”‚ -β”‚ KUBERNETES_READY.md - Current status (this file) β”‚ -β”‚ KUBERNETES_DEPLOYMENT_STATUS.md - Detailed cluster info β”‚ -β”‚ COMPLETE_DEPLOYMENT_GUIDE.md - 6-phase deployment walkthrough β”‚ -β”‚ .gitlab-ci.yml - CI/CD pipeline configuration β”‚ -β”‚ GITLAB_AGENT_DEPLOYMENT.md - Agent setup details β”‚ -β”‚ TRADEHAX_AI_PLATFORM_SUMMARY.md - Feature overview β”‚ -β”‚ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - -╔══════════════════════════════════════════════════════════════════════════════╗ -β•‘ β•‘ -β•‘ 🎯 STATUS: PRODUCTION READY βœ… β•‘ -β•‘ β•‘ -β•‘ Your Kubernetes cluster is fully prepared and connected to GitLab. β•‘ -β•‘ TradeHax AI platform is ready for deployment. β•‘ -β•‘ β•‘ -β•‘ Next Step: β•‘ -β•‘ $ kubectl apply -f k8s/ β•‘ -β•‘ β•‘ -β•‘ Or push to main for auto-deploy: β•‘ -β•‘ $ git push origin main β•‘ -β•‘ β•‘ -β•‘ Your app will be live at: β•‘ -β•‘ https://tradehax.net & https://tradehaxai.tech β•‘ -β•‘ β•‘ -β•‘ Expected deployment time: 2-3 minutes (or 5-10 min with CI/CD) β•‘ -β•‘ β•‘ -β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• diff --git a/DIGITAL_EMPIRE_STRATEGY.md b/DIGITAL_EMPIRE_STRATEGY.md deleted file mode 100644 index d8010759..00000000 --- a/DIGITAL_EMPIRE_STRATEGY.md +++ /dev/null @@ -1,225 +0,0 @@ -# TradeHax Digital Empire Strategy 2026 - -## 🎯 Mission: Unified AI Agent Deployment Platform - -**Current State:** Three disconnected services (trading, music, services) -**Target State:** Interconnected ecosystem where every tool creates network effects - ---- - -## Phase 1: UX Clarity (0-4 weeks) - -### 1. Single Source of Truth Dashboard - -**Problem:** Users don't know where to start -**Solution:** Central dashboard showing: - -- All 3 services in one view with clear value statements -- "Your Profile" showing earned credits across platform -- Smart recommendations based on usage patterns -- Quick-start workflows for each service - -**Impact:** 40% increase in cross-service engagement - -### 2. Hero Value Proposition β€” Instant Clarity - -**Problem:** Landing page says "three precision environments" but doesn't explain *for whom* - -**New Hero Section:** - -``` -TradeHax: Multiply your edge with AI - -πŸ€– For traders: AI signals that work. Deploy real trades on autopilot. -🎸 For creators: AI guitar coach & promotion engine. Ship music 100x faster. -⚑ For builders: AI agents as a service. Launch in days, not months. - -No coding required. No setup required. Get your first signal in 60 seconds. -``` - -**Impact:** Instant understanding + 30% CTA lift - -### 3. Onboarding Redesign (Gamified + Progressive) - -**Current:** "VIEW-ONLY mode" β€” boring -**New:** "Achievement Unlocked" system - -Phase 1 (2 min): "Discover" - Try paper trading on 1 pair -Phase 2 (5 min): "Analyze" - Run one AI scan -Phase 3 (10 min): "Create" - Generate 1 music idea OR service -Phase 4 (30 sec): "Connect" - Link wallet for real execution - -**Rewards:** - -- Badge for completing each phase -- "$100 free credits" after Phase 1 (play money) -- Discord role unlocked after Phase 2 -- Referral link shareable after Phase 3 - -**Impact:** 5x completion rate, instant Discord integration - ---- - -## Phase 2: Network Effects (4-8 weeks) - -### 4. Community Hub & Leaderboards - -**Unified leaderboards across all services:** - -- Trading: Real P&L (anonymized) -- Music: Weekly charts, listens, shares -- Services: Completed projects, client ratings - -**Revenue Driver:** Premium leaderboard (featured rank = $9/mo) - -### 5. Creator Economy / Agent Marketplace - -**Let users monetize:** - -- Share trading strategies β†’ earn % of user profits -- Share music templates β†’ $$ per download -- Share service blueprints β†’ $$ per deployment - -**Technical:** New `/agents/marketplace` route + Stripe integration - -### 6. Referral Flywheel - -**"Invite 3 friends, unlock premium"** - -- Each invite = 1 credit toward premium tier -- Friend gets 1000 free credits on signup -- Viral loop: free users β†’ invite β†’ become paid - ---- - -## Phase 3: Integration Layer (8-12 weeks) - -### 7. Discord Bot Native Integration - -**In any Discord:** - -``` -@tradehax scan AAPL -@tradehax generate "upbeat summer track" -@tradehax recommend-service -``` - -**Revenue:** Freemium bot, $5/mo for guild features - -### 8. Telegram Mini App - -**Lightweight mobile access:** - -- Paper trades in 2 taps -- Music ideas in chat -- Service discovery - -### 9. X (Twitter) Integration - -**Auto-post results:** - -- Trading gains β†’ "I made $X with @tradehax" -- Music β†’ "New track: [link]" -- Built-in viral mechanism - ---- - -## Phase 4: Monetization Clarity (Parallel) - -### 10. Transparent Pricing Ladder - -| Tier | Price | Includes | -|------|-------|----------| -| **Free** | $0 | Paper trading, 1 scan/day, 1 music gen/day | -| **Pro** | $9/mo | Unlimited scans, live trading, priority signals | -| **Builder** | $29/mo | Agent marketplace, service blueprints, API access | -| **Council** | $99/mo | All + 1:1 consulting, custom model, white-label | - -**Key:** Show *exactly* what each tier unlocks. No surprise paywalls. - -### 11. Learning Center (Freeβ†’Paid) - -**Drive recurring engagement + premium conversions:** - -- "Trading 101" (free, 5 videos) -- "Advanced: Momentum Strategies" (free, 5 videos) -- "Mastery: Build Your Own Signal" ($49, cert + agent template) - -**Revenue:** 10% of Pro tier upgrade directly attributable to Mastery cert - ---- - -## Implementation Roadmap - -### Week 1-2: Dashboard + Hero Clarity - -- [ ] Create `(layouts)/dashboard` with unified view -- [ ] Redesign hero section in `app/page.tsx` -- [ ] Add service cards with clear CTAs - -### Week 3-4: Onboarding Gamification - -- [ ] Build achievement system (`lib/achievements`) -- [ ] Create phase-gated modals -- [ ] Integrate Discord bot invite in UX - -### Week 5-6: Community Hub - -- [ ] Create `/leaderboards` route -- [ ] Add real-time P&L sync -- [ ] Premium leaderboard component - -### Week 7-8: Agent Marketplace - -- [ ] Build marketplace UI (`/agents/marketplace`) -- [ ] Integrate Stripe for revenue share -- [ ] Template system for strategies/blueprints - -### Week 9-10: Discord Bot - -- [ ] Publish Discord bot to app directory -- [ ] Build command handlers (scan, generate, recommend) -- [ ] In-bot signup flow - -### Week 11-12: Learning Center + Pricing - -- [ ] Video course builder component -- [ ] Pricing page redesign -- [ ] Certification workflow - ---- - -## Success Metrics (12 months in) - -| Metric | Target | Current Est. | -|--------|--------|-------------| -| Monthly Active Users | 50K | ~500 | -| % Cross-service usage | 35% | ~5% | -| Premium tier conversion | 8% | ~2% | -| Viral coefficient | 1.5+ | ~0.3 | -| Marketplace revenue share | $50K/mo | $0 | -| Discord bot invites | 10K+ guilds | 0 | -| NPS score | 60+ | ~40 | - ---- - -## Why This Works - -1. **Clarity First:** Users instantly understand what TradeHax is (not 3 random tools) -2. **Network Effects:** Leaderboards + referrals create compounding growth -3. **Multiple Revenue Streams:** Subscription + affiliate + marketplace share -4. **Mobile/Social-Ready:** Telegram + Discord + X distribution (not just web) -5. **Learning as Product:** Users become creators β†’ pay for monetization tools -6. **Creator Economy:** Network effects through agent marketplace - ---- - -## Quick Wins (Start This Week) - -Priority order by ROI: - -1. **Dashboard Hub** - Consolidates confusion (-60% support questions) -2. **Hero Clarity** - Increases CTR by 30% immediately -3. **Onboarding Gamification** - 5x completion rate -4. **Leaderboards** - Drives daily return visits (+40% DAU) -5. **Discord Bot** - Viral distribution channel (10K new users/month potential) diff --git a/DISCORD_APP_SETUP.md b/DISCORD_APP_SETUP.md deleted file mode 100644 index 04efb1cb..00000000 --- a/DISCORD_APP_SETUP.md +++ /dev/null @@ -1,65 +0,0 @@ -# Discord App Setup for TradeHax Apps - -This project now includes a verified Discord interactions endpoint and slash command handlers. - -## Included code - -- `app/api/interactions/route.ts` - - Handles Discord interaction webhooks - - Verifies Ed25519 signatures using `DISCORD_PUBLIC_KEY` - - Supports command + ping interaction responses -- `lib/discord/interactions.ts` - - Signature verification helper - - Command handler implementations -- `scripts/discord/register-commands.mjs` - - Registers slash commands to Discord (global or guild scope) - -## Commands included - -- `/app-help` -- `/app-status` -- `/open area:` -- `/trade-start market: symbol:` -- `/market-status` -- `/flow-latest symbol: limit:` -- `/darkpool-latest symbol: limit:` -- `/stock-news symbol: impact: limit:` -- `/top-unusual source: symbol: limit: page:` -- `/news-brief focus: theme: symbol: limit:` -- `/alerts-latest limit:` -- `/quick-stats` - -Most live commands now respond with Discord embeds + link buttons for easier scanning and one-click navigation back into the app. - -Examples: - -- `/top-unusual source:all page:2 limit:8` -- `/news-brief focus:stocks theme:semis limit:6` -- `/news-brief focus:stocks theme:ai symbol:NVDA` - -## Environment variables required - -- `DISCORD_BOT_TOKEN` -- `DISCORD_APPLICATION_ID` -- `DISCORD_PUBLIC_KEY` -- `DISCORD_INTERACTIONS_ENDPOINT_URL` (Discord Developer Portal URL) -- Optional: `DISCORD_GUILD_ID` (for faster guild-scope command iteration) - -## Setup steps - -1. In Discord Developer Portal, set Interactions Endpoint URL to: - - `https://your-domain.com/api/interactions` -2. Ensure env vars above are configured in your deployment. -3. Register commands: - - `npm run discord:register-commands` -4. Test in Discord: - - `/app-help` - - `/trade-start market:crypto symbol:SOL` - -## Notes - -- Global command propagation may take time. -- Guild-scoped commands update quickly for development. -- If Discord says endpoint verification failed, check `DISCORD_PUBLIC_KEY` and deployed endpoint URL. -- Live command depth depends on configured upstream providers (`UNUSUALWHALES_API_KEY`, `POLYGON_API_KEY`, etc.). -- Without vendor keys, commands still work against the deterministic TradeHax simulated intelligence feed. diff --git a/DNS_COMPARISON_TABLE.md b/DNS_COMPARISON_TABLE.md deleted file mode 100644 index 3e2e671b..00000000 --- a/DNS_COMPARISON_TABLE.md +++ /dev/null @@ -1,235 +0,0 @@ -# DNS Records: Current vs Correct Configuration - -## Quick Visual Comparison for tradehaxai.tech - ---- - -## ❌ CURRENT (What You Have Now) - -| Type | Host | Value | Status | -|------|------|-------|--------| -| A Record | `@` | `76.76.21.21` | βœ… **CORRECT** | -| TXT Record | `_vercel` | `cname.vercel-dns.com.` | ❌ **WRONG** | -| TXT Record | `@` | `v=spf1 include:spf.efwd.registrar-servers.com ~all` | βœ… **CORRECT** (Email) | -| CNAME Record | `www` | *(missing)* | ⚠️ **MISSING** | - -### The Problem -The `_vercel` TXT record contains a **CNAME value** when it should contain a **verification string**. - -Think of it like this: -- ❌ Using a website URL as a password β†’ Won't work! -- βœ… Using a password as a password β†’ Works! - ---- - -## βœ… CORRECT (What You Need) - -| Type | Host | Value | Action Required | -|------|------|-------|----------------| -| A Record | `@` | `76.76.21.21` | βœ… Keep as-is | -| TXT Record | `_vercel` | `vc-domain-verify=tradehaxai.tech,9b1517380c738599577c` | πŸ”§ **REPLACE** | -| TXT Record | `@` | `v=spf1 include:spf.efwd.registrar-servers.com ~all` | βœ… Keep as-is | -| CNAME Record | `www` | `cname.vercel-dns.com.` | βž• **ADD** | - ---- - -## πŸ”„ Side-by-Side Comparison - -### _vercel TXT Record - -| Current (Wrong) ❌ | Correct βœ… | -|-------------------|-----------| -| Type: TXT | Type: TXT | -| Host: `_vercel` | Host: `_vercel` | -| **Value: `cname.vercel-dns.com.`** | **Value: `vc-domain-verify=tradehaxai.tech,XXXXX`** | -| ❌ This is a CNAME domain | βœ… This is a verification string | -| ❌ Wrong format for TXT | βœ… Correct format for TXT | -| ❌ Vercel can't verify | βœ… Vercel can verify | - ---- - -## πŸ“ Understanding the Record Types - -### A Record (Routing - Points to Server) -``` -Purpose: Routes traffic to a server IP address -Example: @ β†’ 76.76.21.21 -Function: "Send visitors here" -Your Status: βœ… Correct -``` - -### TXT Record (Verification - Proves Ownership) -``` -Purpose: Proves you own the domain -Example: _vercel β†’ vc-domain-verify=tradehaxai.tech,9b15... -Function: "Here's my proof of ownership" -Your Status: ❌ Wrong (has a domain instead of proof) -``` - -### CNAME Record (Alias - Points to Another Domain) -``` -Purpose: Creates an alias to another domain -Example: www β†’ cname.vercel-dns.com. -Function: "This subdomain is an alias" -Your Status: ⚠️ Missing -``` - ---- - -## 🎯 What to Change (Step-by-Step) - -### Step 1: Fix the _vercel TXT Record (CRITICAL) - -**Delete This:** -``` -Type: TXT Record -Host: _vercel -Value: cname.vercel-dns.com. ← DELETE THIS -``` - -**Add This:** -``` -Type: TXT Record -Host: _vercel -Value: vc-domain-verify=tradehaxai.tech,XXXXXXXXXXXXX ← ADD THIS -``` - -**How to Get the Correct Value:** -1. Go to https://vercel.com/dashboard -2. Your Project β†’ Settings β†’ Domains -3. Click "Add Domain" -4. Enter: `tradehaxai.tech` -5. Copy the verification string Vercel shows you -6. Use that as the value - ---- - -### Step 2: Add www CNAME Record (Recommended) - -**Add This:** -``` -Type: CNAME Record -Host: www -Value: cname.vercel-dns.com. -TTL: Automatic -``` - ---- - -## πŸ”§ Configuration Checklist - -- [ ] **Delete** wrong TXT record - - Host: `_vercel` - - Current value: `cname.vercel-dns.com.` - -- [ ] **Get** verification string from Vercel Dashboard - - Format: `vc-domain-verify=tradehaxai.tech,XXXXX` - -- [ ] **Add** correct TXT record - - Type: TXT - - Host: `_vercel` - - Value: (paste verification string from Vercel) - -- [ ] **Add** CNAME record for www - - Type: CNAME - - Host: `www` - - Value: `cname.vercel-dns.com.` - -- [ ] **Wait** 15-30 minutes for DNS propagation - -- [ ] **Add** domain in Vercel Dashboard - - Enter: `tradehaxai.tech` - - Also add: `www.tradehaxai.tech` - -- [ ] **Verify** domain shows "Valid Configuration" - -- [ ] **Test** site loads: https://tradehaxai.tech - ---- - -## ⚑ Quick Commands to Check DNS - -```bash -# Check if you have dig installed -which dig - -# Check A record -dig tradehaxai.tech A +short - -# Check TXT record -dig _vercel.tradehaxai.tech TXT +short - -# Check CNAME record -dig www.tradehaxai.tech CNAME +short - -# Run automated checker -npm run check:dns -``` - ---- - -## πŸ“Š Expected Results After Fix - -### Using dnschecker.org - -**Check these URLs after making changes:** - -1. **A Record**: https://dnschecker.org/?domain=tradehaxai.tech&type=A - - Should show: `76.76.21.21` globally - -2. **TXT Record**: https://dnschecker.org/?domain=_vercel.tradehaxai.tech&type=TXT - - Should show: `vc-domain-verify=tradehaxai.tech,XXXXX` globally - -3. **CNAME Record**: https://dnschecker.org/?domain=www.tradehaxai.tech&type=CNAME - - Should show: `cname.vercel-dns.com.` globally - ---- - -## ❓ Why Was This Wrong? - -### Common Confusion - -**CNAME vs TXT Records:** - -| CNAME Record | TXT Record | -|--------------|------------| -| Points to a domain | Stores text data | -| Used for aliasing | Used for verification | -| Example: `www` β†’ `cname.vercel-dns.com.` | Example: `_vercel` β†’ `vc-domain-verify=...` | -| Like a forwarding address | Like a security badge | - -**What Happened:** -Someone saw "cname.vercel-dns.com" in Vercel docs and used it as the value for the TXT record, but: -- `cname.vercel-dns.com` is for **CNAME records** (like `www`) -- `vc-domain-verify=...` is for **TXT records** (like `_vercel`) - ---- - -## ⏱️ Timeline - -| Time | What Happens | -|------|-------------| -| **0 min** | You make DNS changes in Namecheap | -| **5 min** | DNS changes propagate to nearest servers | -| **15-30 min** | DNS changes propagate globally | -| **30 min** | Add domain in Vercel Dashboard | -| **31 min** | Vercel verifies domain ownership | -| **35 min** | Vercel issues SSL certificate | -| **40 min** | Site is live at https://tradehaxai.tech πŸŽ‰ | - ---- - -## πŸ“š Full Documentation - -For more details, see: - -- **Quick Fix**: [DNS_QUICK_FIX.md](./DNS_QUICK_FIX.md) -- **Detailed Analysis**: [DNS_INSPECTION_REPORT.md](./DNS_INSPECTION_REPORT.md) -- **Setup Guide**: [VERCEL_DOMAIN_SETUP.md](./VERCEL_DOMAIN_SETUP.md) -- **Summary**: [DNS_CONFIGURATION_SUMMARY.md](./DNS_CONFIGURATION_SUMMARY.md) - ---- - -**Last Updated**: 2026-02-08 -**Priority**: 🚨 HIGH - Site won't work without fixing TXT record -**Time to Fix**: ⏱️ 5 minutes + 30 minutes DNS propagation diff --git a/DNS_CONFIGURATION_SUMMARY.md b/DNS_CONFIGURATION_SUMMARY.md deleted file mode 100644 index 3b052ce1..00000000 --- a/DNS_CONFIGURATION_SUMMARY.md +++ /dev/null @@ -1,271 +0,0 @@ -# DNS Configuration Summary for tradehaxai.tech - -**Last Updated**: 2026-02-08 -**Domain**: tradehaxai.tech -**Status**: ⚠️ Action Required - ---- - -## πŸ“‹ What Was Inspected - -Based on the DNS records provided from Namecheap, we've conducted a comprehensive analysis of your domain configuration for Vercel deployment. - ---- - -## πŸ” Key Findings - -### βœ… What's Working -1. **A Record** correctly points to Vercel IP: `76.76.21.21` -2. **SPF Record** correctly configured for email forwarding -3. **Repository configuration** is production-ready for Vercel -4. **Security headers** properly configured in `vercel.json` -5. **Domain allowlisting** includes `tradehaxai.tech` in CSP and Next.js config - -### ❌ Critical Issue Found -**TXT Record for `_vercel` is INCORRECT** - -**Current (Wrong)**: -``` -Type: TXT -Host: _vercel -Value: cname.vercel-dns.com. ❌ -``` - -**Should Be**: -``` -Type: TXT -Host: _vercel -Value: vc-domain-verify=tradehaxai.tech,XXXXXXXXXXXXX βœ… -``` - -**Why This Matters**: -- Vercel uses this TXT record to verify you own the domain -- Without correct verification, Vercel will reject the domain -- Your site will not be accessible via tradehaxai.tech -- SSL certificates cannot be provisioned - -### ⚠️ Missing (Recommended) -**CNAME Record for WWW subdomain** - -**Should Add**: -``` -Type: CNAME -Host: www -Value: cname.vercel-dns.com. -``` - -**Why This Matters**: -- Users typing `www.tradehaxai.tech` will get errors -- Best practice is to support both apex and www -- Vercel can automatically redirect www to apex - ---- - -## πŸ“š Documentation Created - -We've created comprehensive documentation to help you fix these issues: - -### 1. DNS_QUICK_FIX.md -**Start here** for immediate action. Shows exactly what to change in 5 minutes. -- ⏱️ Read time: 2 minutes -- 🎯 Purpose: Quick fix for critical issue -- πŸ“ Location: [DNS_QUICK_FIX.md](./DNS_QUICK_FIX.md) - -### 2. DNS_INSPECTION_REPORT.md -Detailed 350+ line analysis of your DNS configuration. -- ⏱️ Read time: 10 minutes -- 🎯 Purpose: Understand the problem in depth -- πŸ“ Location: [DNS_INSPECTION_REPORT.md](./DNS_INSPECTION_REPORT.md) -- πŸ“– Includes: - - Line-by-line DNS record analysis - - Step-by-step fix instructions - - Troubleshooting guide - - Timeline expectations - - Verification checklist - -### 3. VERCEL_DOMAIN_SETUP.md (Updated) -Complete domain setup guide with new warnings. -- ⏱️ Read time: 15 minutes -- 🎯 Purpose: Complete domain configuration -- πŸ“ Location: [VERCEL_DOMAIN_SETUP.md](./VERCEL_DOMAIN_SETUP.md) -- ✨ Updates: - - Added critical warning about TXT record mistake - - Step-by-step guide to get verification string - - Reference to DNS inspection report - -### 4. README.md (Updated) -Added DNS documentation to deployment issues section. -- πŸ“ Location: [README.md](./README.md) -- ✨ New section at top with DNS links - ---- - -## 🎯 Action Plan - -Follow this sequence: - -### Immediate (5 minutes) -1. Read [DNS_QUICK_FIX.md](./DNS_QUICK_FIX.md) -2. Log into Namecheap Advanced DNS -3. Delete wrong `_vercel` TXT record -4. Get verification string from Vercel Dashboard -5. Add correct `_vercel` TXT record -6. Add `www` CNAME record (optional but recommended) - -### Wait (15-30 minutes) -- DNS propagation takes time -- Check progress at https://dnschecker.org -- Search for `_vercel.tradehaxai.tech` to verify TXT record -- Search for `tradehaxai.tech` to verify A record - -### Complete Setup (10 minutes) -1. Go to Vercel Dashboard β†’ Settings β†’ Domains -2. Add `tradehaxai.tech` -3. Add `www.tradehaxai.tech` (if you added CNAME) -4. Wait for "Valid Configuration" status -5. Wait for SSL certificate provisioning (automatic) -6. Test: Visit https://tradehaxai.tech - ---- - -## πŸ“Š Timeline - -| Phase | Duration | What Happens | -|-------|----------|--------------| -| DNS Changes | 5 mins | You make changes in Namecheap | -| DNS Propagation | 15-30 mins | Changes spread to DNS servers | -| Domain Verification | 1-5 mins | Vercel verifies ownership | -| SSL Provisioning | 5-15 mins | Vercel issues SSL certificate | -| **Total** | **~30-60 mins** | **Site fully live** | - ---- - -## βœ… Verification Checklist - -After making changes, verify each step: - -### DNS Configuration -- [ ] `_vercel` TXT record deleted (old value: `cname.vercel-dns.com.`) -- [ ] `_vercel` TXT record added (new value: `vc-domain-verify=...`) -- [ ] `www` CNAME record added (value: `cname.vercel-dns.com.`) -- [ ] Changes saved in Namecheap (clicked checkmark) - -### DNS Propagation -- [ ] Checked https://dnschecker.org for `_vercel.tradehaxai.tech` -- [ ] TXT record shows verification string globally -- [ ] Checked https://dnschecker.org for `tradehaxai.tech` -- [ ] A record shows `76.76.21.21` globally - -### Vercel Configuration -- [ ] Added `tradehaxai.tech` in Vercel Dashboard -- [ ] Added `www.tradehaxai.tech` in Vercel Dashboard (optional) -- [ ] Domain shows "Valid Configuration" status -- [ ] SSL certificate shows as "Active" - -### Site Accessibility -- [ ] https://tradehaxai.tech loads successfully -- [ ] https://www.tradehaxai.tech loads successfully (if configured) -- [ ] Browser shows secure padlock (SSL working) -- [ ] No mixed content warnings in console -- [ ] Site loads correctly on mobile devices - ---- - -## πŸ› οΈ No Code Changes Needed - -Your repository is **already correctly configured**: - -- βœ… `vercel.json` - Properly configured for Next.js deployment -- βœ… `next.config.ts` - Conditional export for dual deployment -- βœ… `.github/workflows/vercel-deploy.yml` - Automated deployment -- βœ… Environment variables - Domain included in CSP and allowlists -- βœ… Image optimization - Configured for tradehaxai.tech - -**All fixes are DNS-only** - no code deployment needed! - ---- - -## πŸ“– Understanding the Mistake - -### Why Was the TXT Record Wrong? - -**Common Confusion**: -- CNAME records use domain values like `cname.vercel-dns.com` -- TXT records for verification use strings like `vc-domain-verify=...` -- Both are related to Vercel, but serve different purposes - -**Analogy**: -| Record Type | Like... | Example Value | -|-------------|---------|---------------| -| A Record | Street Address | `76.76.21.21` | -| CNAME | Forwarding Address | `cname.vercel-dns.com` | -| TXT Verification | Security Badge Number | `vc-domain-verify=tradehaxai.tech,9b15...` | - -You wouldn't use a forwarding address as a security badge - similarly, you can't use a CNAME value in a TXT verification record. - ---- - -## πŸŽ“ Lessons Learned - -1. **TXT vs CNAME**: Different record types for different purposes -2. **Verification β‰  Routing**: Verification proves ownership, routing directs traffic -3. **Get from Source**: Always get verification strings from Vercel Dashboard -4. **Check Propagation**: Use tools like dnschecker.org to verify changes -5. **Be Patient**: DNS takes time, don't panic if not instant - ---- - -## πŸ†˜ Need Help? - -### If DNS Changes Don't Work -1. **Wait longer** - Full propagation can take 24-48 hours -2. **Check for typos** - Verification string must be exact -3. **Verify saved** - Ensure you clicked save in Namecheap -4. **Check nameservers** - Should point to Namecheap, not Vercel - -### If Vercel Won't Verify -1. **Wait for propagation** - Check dnschecker.org -2. **Try again** - Remove and re-add domain in Vercel -3. **Contact Vercel** - https://vercel.com/support -4. **Check examples** - See [DNS_INSPECTION_REPORT.md](./DNS_INSPECTION_REPORT.md) - ---- - -## πŸ“ž Support Resources - -- **Quick Fix**: [DNS_QUICK_FIX.md](./DNS_QUICK_FIX.md) -- **Detailed Analysis**: [DNS_INSPECTION_REPORT.md](./DNS_INSPECTION_REPORT.md) -- **Setup Guide**: [VERCEL_DOMAIN_SETUP.md](./VERCEL_DOMAIN_SETUP.md) -- **DNS Checker**: https://dnschecker.org -- **Vercel Docs**: https://vercel.com/docs/concepts/projects/domains -- **Vercel Support**: https://vercel.com/support - ---- - -## πŸŽ‰ After Success - -Once your site is live: - -1. **Update Environment Variables** (if needed) - - Set `NEXT_PUBLIC_SITE_URL=https://tradehaxai.tech` - - Redeploy to apply changes - -2. **Test All Features** - - Navigation works - - Web3 wallet connects - - Forms submit correctly - - Analytics tracking - -3. **Monitor Performance** - - Check Vercel Analytics - - Monitor error logs - - Test on different devices - -4. **Share Your Site** - - Your site is live at https://tradehaxai.tech! πŸŽ‰ - ---- - -**Status**: πŸ“ Documentation Complete -**Next Step**: πŸš€ [Follow DNS_QUICK_FIX.md](./DNS_QUICK_FIX.md) to fix DNS -**ETA to Live**: ⏱️ ~30-60 minutes after DNS changes diff --git a/DNS_INDEX.md b/DNS_INDEX.md deleted file mode 100644 index 93ce8826..00000000 --- a/DNS_INDEX.md +++ /dev/null @@ -1,277 +0,0 @@ -# DNS Documentation Index - -**Welcome!** This directory contains comprehensive DNS configuration documentation for deploying `tradehaxai.tech` on Vercel. - ---- - -## 🚨 START HERE - -If you just want to fix your DNS and get your site live: - -πŸ‘‰ **[DNS_QUICK_FIX.md](./DNS_QUICK_FIX.md)** - 5-minute fix guide - ---- - -## πŸ“ Documentation Overview - -### Quick Reference (5 minutes) -- **[DNS_QUICK_FIX.md](./DNS_QUICK_FIX.md)** - - Immediate action guide - - Shows exactly what's wrong and how to fix it - - No technical background needed - - Best for: "Just tell me what to do!" - -- **[DNS_COMPARISON_TABLE.md](./DNS_COMPARISON_TABLE.md)** - - Visual side-by-side comparison - - Current vs Correct configuration - - Color-coded for easy understanding - - Best for: "Show me what's different" - -### Detailed Analysis (10-15 minutes) -- **[DNS_INSPECTION_REPORT.md](./DNS_INSPECTION_REPORT.md)** - - Comprehensive 350+ line analysis - - Line-by-line DNS record evaluation - - Detailed troubleshooting guide - - Timeline expectations - - Complete verification checklist - - Best for: "I want to understand everything" - -- **[DNS_CONFIGURATION_SUMMARY.md](./DNS_CONFIGURATION_SUMMARY.md)** - - Executive summary of findings - - Links to all documentation - - Action plan with timeline - - Verification checklist - - Repository configuration status - - Best for: "Give me the big picture" - -### Setup Guides (15-20 minutes) -- **[VERCEL_DOMAIN_SETUP.md](./VERCEL_DOMAIN_SETUP.md)** - - Complete domain configuration guide - - Step-by-step instructions - - Updated with critical warnings - - Troubleshooting section - - Security best practices - - Best for: "Walk me through the entire setup" - ---- - -## πŸ› οΈ Tools & Scripts - -### Automated DNS Checker -```bash -npm run check:dns -``` - -This command runs `scripts/check-dns-config.sh` which: -- βœ… Checks your A record (apex domain) -- βœ… Checks your _vercel TXT record (verification) -- βœ… Checks your www CNAME record (subdomain) -- βœ… Provides actionable feedback -- βœ… Links to documentation - -**Usage:** -```bash -cd /home/runner/work/main/main -npm run check:dns -``` - -### Vercel Configuration Checker -```bash -npm run check:vercel -``` - -This command runs `scripts/check-vercel-config.sh` which: -- βœ… Verifies repository structure -- βœ… Checks Next.js configuration -- βœ… Validates Vercel configuration -- βœ… Checks Git branch setup -- βœ… Verifies workflows - ---- - -## 🎯 Choose Your Path - -### Path 1: "I just want it fixed NOW" -1. Read [DNS_QUICK_FIX.md](./DNS_QUICK_FIX.md) (2 min) -2. Make DNS changes in Namecheap (5 min) -3. Wait for propagation (15-30 min) -4. Add domain in Vercel Dashboard (2 min) -5. βœ… Done! - -### Path 2: "I want to understand what's wrong" -1. Read [DNS_COMPARISON_TABLE.md](./DNS_COMPARISON_TABLE.md) (5 min) -2. Read [DNS_INSPECTION_REPORT.md](./DNS_INSPECTION_REPORT.md) (10 min) -3. Follow step-by-step instructions (5 min) -4. Wait for propagation (15-30 min) -5. Verify with checklist (5 min) -6. βœ… Done! - -### Path 3: "I want the complete setup guide" -1. Read [VERCEL_DOMAIN_SETUP.md](./VERCEL_DOMAIN_SETUP.md) (15 min) -2. Complete all prerequisites -3. Follow each step carefully -4. Use troubleshooting section if needed -5. βœ… Done! - -### Path 4: "I'm a developer, show me everything" -1. Read [DNS_CONFIGURATION_SUMMARY.md](./DNS_CONFIGURATION_SUMMARY.md) (5 min) -2. Read [DNS_INSPECTION_REPORT.md](./DNS_INSPECTION_REPORT.md) (10 min) -3. Run `npm run check:dns` to verify current state -4. Make DNS changes -5. Run `npm run check:vercel` to verify repo config -6. βœ… Done! - ---- - -## πŸ” The Problem (TL;DR) - -Your `_vercel` TXT record has the wrong value: - -❌ **Current**: `cname.vercel-dns.com.` (This is a CNAME value) -βœ… **Should be**: `vc-domain-verify=tradehaxai.tech,XXXXX` (This is a verification string) - -**Why it matters**: Vercel uses this TXT record to verify you own the domain. Without the correct verification string, Vercel won't allow the domain to be configured. - ---- - -## βœ… The Solution (TL;DR) - -1. **Delete** the wrong `_vercel` TXT record -2. **Get** verification string from Vercel Dashboard -3. **Add** correct `_vercel` TXT record with verification string -4. **Add** `www` CNAME record (optional but recommended) -5. **Wait** 15-30 minutes for DNS to propagate -6. **Add** domain in Vercel Dashboard -7. **Test** your site at https://tradehaxai.tech - ---- - -## πŸ“Š Documentation Stats - -| Document | Size | Read Time | Purpose | -|----------|------|-----------|---------| -| DNS_QUICK_FIX.md | 2.6 KB | 2 min | Quick fix instructions | -| DNS_COMPARISON_TABLE.md | 6.1 KB | 5 min | Visual comparison | -| DNS_INSPECTION_REPORT.md | 11.6 KB | 10 min | Detailed analysis | -| DNS_CONFIGURATION_SUMMARY.md | 8.3 KB | 8 min | Executive summary | -| VERCEL_DOMAIN_SETUP.md | 9.2 KB | 15 min | Complete setup guide | -| **Total** | **37.8 KB** | **40 min** | **Everything you need** | - ---- - -## πŸŽ“ Key Concepts - -### DNS Record Types - -| Type | Purpose | Your Status | -|------|---------|-------------| -| **A Record** | Points domain to IP address | βœ… Correct | -| **TXT Record** | Stores verification strings | ❌ Wrong | -| **CNAME Record** | Creates domain alias | ⚠️ Missing | - -### Why TXT β‰  CNAME - -- **TXT records** store text (like verification codes, SPF records) -- **CNAME records** point to domains (like aliases, redirects) -- **You can't mix them** - they serve different purposes - -### Verification String Format - -βœ… **Correct format**: `vc-domain-verify=tradehaxai.tech,9b1517380c738599577c` -❌ **Wrong format**: `cname.vercel-dns.com.` - ---- - -## ⏱️ Timeline - -| Phase | Duration | What Happens | -|-------|----------|--------------| -| DNS Changes | 5 min | You update records in Namecheap | -| DNS Propagation | 15-30 min | Changes spread to DNS servers worldwide | -| Domain Verification | 1-5 min | Vercel verifies ownership via TXT record | -| SSL Provisioning | 5-15 min | Vercel automatically issues SSL certificate | -| **Total** | **~30-60 min** | **Your site is live!** πŸŽ‰ | - ---- - -## πŸ†˜ Need Help? - -### Tools -- **DNS Propagation Checker**: https://dnschecker.org -- **Vercel Dashboard**: https://vercel.com/dashboard -- **Vercel Docs**: https://vercel.com/docs/concepts/projects/domains - -### Commands -```bash -# Check DNS configuration -npm run check:dns - -# Check Vercel configuration -npm run check:vercel - -# Manual DNS checks (if you have dig) -dig tradehaxai.tech A +short -dig _vercel.tradehaxai.tech TXT +short -dig www.tradehaxai.tech CNAME +short -``` - -### Support -- **Vercel Support**: https://vercel.com/support -- **Namecheap Support**: https://www.namecheap.com/support/ - ---- - -## πŸ“ Related Documentation - -### Deployment Guides -- [DEPLOYMENT_FIX_CHECKLIST.md](./DEPLOYMENT_FIX_CHECKLIST.md) -- [DEPLOYMENT_FIX_SUMMARY.md](./DEPLOYMENT_FIX_SUMMARY.md) -- [DEPLOYMENT_QUICKSTART.md](./DEPLOYMENT_QUICKSTART.md) - -### Vercel Specific -- [VERCEL_BRANCH_FIX.md](./VERCEL_BRANCH_FIX.md) -- [VERCEL_STATIC_EXPORT_FIX.md](./VERCEL_STATIC_EXPORT_FIX.md) -- [VERCEL_DEPLOYMENT_TROUBLESHOOTING.md](./VERCEL_DEPLOYMENT_TROUBLESHOOTING.md) - -### Configuration -- [GITHUB_SECRETS_SETUP.md](./GITHUB_SECRETS_SETUP.md) -- [INTEGRATION_GUIDE.md](./INTEGRATION_GUIDE.md) - ---- - -## βœ… Quick Checklist - -Before you start: -- [ ] You have access to Namecheap DNS settings for tradehaxai.tech -- [ ] You have access to Vercel Dashboard -- [ ] You have 30-60 minutes for DNS propagation - -DNS Configuration: -- [ ] Delete wrong `_vercel` TXT record -- [ ] Get verification string from Vercel -- [ ] Add correct `_vercel` TXT record -- [ ] Add `www` CNAME record - -Verification: -- [ ] Wait for DNS propagation (check dnschecker.org) -- [ ] Add domain in Vercel Dashboard -- [ ] Verify "Valid Configuration" status -- [ ] Test site: https://tradehaxai.tech - ---- - -## πŸŽ‰ Success Criteria - -You'll know everything is working when: - -βœ… https://tradehaxai.tech loads your site -βœ… Browser shows secure padlock (SSL working) -βœ… Vercel Dashboard shows "Valid Configuration" -βœ… `npm run check:dns` shows all checks passing -βœ… No mixed content warnings in browser console - ---- - -**Last Updated**: 2026-02-08 -**Status**: Ready for deployment -**Next Step**: [Start with DNS_QUICK_FIX.md](./DNS_QUICK_FIX.md) diff --git a/DNS_INSPECTION_REPORT.md b/DNS_INSPECTION_REPORT.md deleted file mode 100644 index 60e81748..00000000 --- a/DNS_INSPECTION_REPORT.md +++ /dev/null @@ -1,361 +0,0 @@ -# DNS Inspection Report for tradehaxai.tech - -**Date**: 2026-02-08 -**Domain**: tradehaxai.tech -**Registrar**: Namecheap -**Target Platform**: Vercel - ---- - -## Executive Summary - -This report analyzes the current DNS configuration for `tradehaxai.tech` as provided from Namecheap DNS settings. We've identified **one critical issue** that will prevent Vercel from verifying and deploying to your custom domain. - -### Status Overview -- βœ… **A Record**: Correctly configured -- ❌ **TXT Record (_vercel)**: CRITICAL - Incorrectly configured -- ⚠️ **CNAME Record (www)**: Missing (recommended but optional) -- βœ… **SPF Record**: Correctly configured for email - ---- - -## Current DNS Configuration Analysis - -### 1. A Record - βœ… CORRECT -``` -Type: A Record -Host: @ -Value: 76.76.21.21 -TTL: Automatic -``` - -**Status**: βœ… **Correct** -**Analysis**: This A record correctly points your apex domain (tradehaxai.tech) to Vercel's IP address. This is the primary DNS record needed for Vercel deployment. - ---- - -### 2. TXT Record (_vercel) - ❌ CRITICAL ERROR - -**Current Configuration:** -``` -Type: TXT Record -Host: _vercel -Value: cname.vercel-dns.com. -TTL: Automatic -``` - -**Status**: ❌ **INCORRECT - CRITICAL** -**Issue**: The `_vercel` TXT record contains `cname.vercel-dns.com.` which is incorrect. This should be a domain verification string, not a CNAME value. - -**Required Configuration:** -``` -Type: TXT Record -Host: _vercel -Value: vc-domain-verify=tradehaxai.tech,9b1517380c738599577c -TTL: Automatic or 3600 -``` - -**Impact**: -- Vercel cannot verify domain ownership -- Custom domain will fail to activate in Vercel -- Site will not be accessible via tradehaxai.tech -- SSL certificate will not be provisioned - -**Action Required**: -1. Delete the existing `_vercel` TXT record with value `cname.vercel-dns.com.` -2. Add a new `_vercel` TXT record with the correct verification string -3. To get your verification string: - - Go to Vercel Dashboard β†’ Your Project β†’ Settings β†’ Domains - - Click "Add Domain" and enter `tradehaxai.tech` - - Vercel will show you the exact TXT record value to use - - The format will be: `vc-domain-verify=tradehaxai.tech,XXXXXXXXXXXXX` - ---- - -### 3. CNAME Record (www) - ⚠️ MISSING - -**Current Configuration:** -``` -No www CNAME record found -``` - -**Status**: ⚠️ **Missing** (Recommended but not critical) -**Issue**: Without a www CNAME record, visitors trying to access `www.tradehaxai.tech` will get an error. - -**Recommended Configuration:** -``` -Type: CNAME Record -Host: www -Value: cname.vercel-dns.com. -TTL: Automatic or 3600 -``` - -**Impact**: -- `www.tradehaxai.tech` will not work -- Some users who type "www" will get errors -- Best practice is to support both apex and www subdomain - -**Action Required**: -Add a CNAME record pointing `www` to `cname.vercel-dns.com.` - -**Note**: Make sure the CNAME value ends with a dot (`.`) if your DNS provider requires it: `cname.vercel-dns.com.` - ---- - -### 4. TXT Record (SPF) - βœ… CORRECT - -**Current Configuration:** -``` -Type: TXT Record -Host: @ -Value: v=spf1 include:spf.efwd.registrar-servers.com ~all -TTL: Automatic -``` - -**Status**: βœ… **Correct** -**Analysis**: This SPF (Sender Policy Framework) record is correctly configured for email authentication. This is used by Namecheap's email forwarding service and should be kept as-is. - -**Action Required**: No changes needed - leave this record intact. - ---- - -## Step-by-Step Fix Instructions - -### Step 1: Fix the _vercel TXT Record (CRITICAL) - -1. **Log into Namecheap** - - Go to https://www.namecheap.com/ - - Sign in to your account - - Navigate to Domain List β†’ Manage β†’ Advanced DNS - -2. **Delete the Incorrect Record** - - Find the TXT record with Host `_vercel` and Value `cname.vercel-dns.com.` - - Click the trash/delete icon to remove it - -3. **Get Your Verification String from Vercel** - - Open https://vercel.com/dashboard - - Go to your project (main) - - Click Settings β†’ Domains - - Click "Add Domain" - - Enter: `tradehaxai.tech` - - Vercel will display the exact TXT record you need to add - - Copy the verification string (format: `vc-domain-verify=tradehaxai.tech,XXXXX`) - -4. **Add the Correct TXT Record** - - In Namecheap Advanced DNS, click "Add New Record" - - Select: TXT Record - - Host: `_vercel` - - Value: Paste the verification string from Vercel (e.g., `vc-domain-verify=tradehaxai.tech,9b1517380c738599577c`) - - TTL: Automatic (or 3600) - - Click the checkmark to save - -5. **Wait for DNS Propagation** - - DNS changes typically take 5-15 minutes - - Can take up to 48 hours in rare cases - - Check propagation: https://dnschecker.org (search for `_vercel.tradehaxai.tech`) - -### Step 2: Add www CNAME Record (Recommended) - -1. **In Namecheap Advanced DNS** - - Click "Add New Record" - - Select: CNAME Record - - Host: `www` - - Value: `cname.vercel-dns.com.` (include the dot at the end) - - TTL: Automatic (or 3600) - - Click the checkmark to save - -2. **Verify in Vercel Dashboard** - - After adding the CNAME, also add `www.tradehaxai.tech` as a domain in Vercel - - Vercel will automatically verify and configure it - -### Step 3: Verify Domain in Vercel - -1. **Complete Domain Addition** - - Go to Vercel Dashboard β†’ Settings β†’ Domains - - The domain `tradehaxai.tech` should now show "Valid Configuration" - - SSL certificate should be automatically provisioned (takes 5-15 minutes) - -2. **Test Your Site** - - Visit: https://tradehaxai.tech - - Visit: https://www.tradehaxai.tech - - Both should load your site with HTTPS (secure padlock icon) - ---- - -## DNS Configuration Checklist - -Use this checklist to ensure proper setup: - -- [ ] **Delete** incorrect TXT record: `_vercel` β†’ `cname.vercel-dns.com.` -- [ ] **Get** verification string from Vercel Dashboard -- [ ] **Add** correct TXT record: `_vercel` β†’ `vc-domain-verify=tradehaxai.tech,XXXXX` -- [ ] **Add** CNAME record: `www` β†’ `cname.vercel-dns.com.` -- [ ] **Wait** 15-30 minutes for DNS propagation -- [ ] **Add** domain in Vercel Dashboard: `tradehaxai.tech` -- [ ] **Add** domain in Vercel Dashboard: `www.tradehaxai.tech` -- [ ] **Verify** domain shows "Valid Configuration" in Vercel -- [ ] **Verify** SSL certificate is active (green padlock) -- [ ] **Test** site loads: https://tradehaxai.tech -- [ ] **Test** site loads: https://www.tradehaxai.tech - ---- - -## Complete DNS Configuration Reference - -After completing all fixes, your DNS configuration should look like this: - -### Host Records -| Type | Host | Value | TTL | -|------|------|-------|-----| -| A | @ | 76.76.21.21 | Automatic | -| CNAME | www | cname.vercel-dns.com. | Automatic | -| TXT | _vercel | vc-domain-verify=tradehaxai.tech,XXXXXXXXXXXXX | Automatic | - -### Mail Settings -| Type | Host | Value | TTL | -|------|------|-------|-----| -| TXT | @ | v=spf1 include:spf.efwd.registrar-servers.com ~all | Automatic | - ---- - -## Troubleshooting - -### "Domain verification failed" in Vercel - -**Possible Causes:** -1. TXT record not propagated yet β†’ Wait 15-30 minutes -2. TXT record value incorrect β†’ Double-check you copied the entire string -3. TXT record host incorrect β†’ Must be `_vercel` (with underscore) - -**Solution:** -- Use https://dnschecker.org to check `_vercel.tradehaxai.tech` -- Verify the TXT record shows your verification string -- If not visible after 1 hour, check Namecheap for typos -- Make sure you saved the record in Namecheap (click the checkmark) - -### "Invalid Configuration" in Vercel - -**Possible Causes:** -1. A record not pointing to Vercel IP -2. DNS not propagated yet -3. Domain not added in Vercel dashboard - -**Solution:** -- Verify A record points to 76.76.21.21 -- Use https://dnschecker.org to check `tradehaxai.tech` -- Wait for DNS propagation (up to 48 hours) -- Ensure domain is added in Vercel Dashboard β†’ Settings β†’ Domains - -### Site not loading / DNS_PROBE_FINISHED_NXDOMAIN - -**Possible Causes:** -1. DNS changes not propagated yet -2. Browser DNS cache -3. Local DNS cache - -**Solution:** -- Wait 24-48 hours for full global DNS propagation -- Clear browser cache (Ctrl+Shift+Delete) -- Flush DNS cache: - - Windows: `ipconfig /flushdns` - - Mac: `sudo dscacheutil -flushcache` - - Linux: `sudo systemd-resolve --flush-caches` -- Try accessing from different device/network -- Use https://dnschecker.org to verify DNS is propagated globally - -### SSL Certificate Not Provisioning - -**Possible Causes:** -1. DNS not fully propagated -2. Domain not verified in Vercel -3. CAA records blocking Let's Encrypt - -**Solution:** -1. Ensure DNS is fully propagated first (use dnschecker.org) -2. Verify domain shows "Valid Configuration" in Vercel -3. Wait 30 minutes after DNS propagates -4. Check for CAA DNS records in Namecheap (none should exist) -5. Try removing and re-adding the domain in Vercel -6. Contact Vercel support if issue persists after 48 hours - ---- - -## Understanding the Error - -### Why was the _vercel TXT record wrong? - -The confusion likely occurred because: -1. CNAME records use values like `cname.vercel-dns.com` -2. TXT records for verification use strings like `vc-domain-verify=...` -3. The `_vercel` record must be a **TXT record** (not CNAME) with a **verification string** (not a domain) - -**Analogy**: Think of it like a password vs a website URL: -- The A/CNAME records are like URLs (they point to a location) -- The TXT verification is like a password (it proves you own the domain) - -You wouldn't use a website URL as a password - similarly, you can't use a CNAME value in a TXT verification record. - ---- - -## Additional Resources - -- **Vercel Domains Documentation**: https://vercel.com/docs/concepts/projects/domains -- **DNS Checker Tool**: https://dnschecker.org -- **Namecheap DNS Management**: https://www.namecheap.com/support/knowledgebase/article.aspx/767/10/how-to-change-dns-for-a-domain/ -- **Vercel Support**: https://vercel.com/support -- **SSL Checker**: https://www.sslshopper.com/ssl-checker.html - ---- - -## Next Steps - -1. βœ… **Immediate**: Fix the `_vercel` TXT record (critical) -2. βœ… **Recommended**: Add `www` CNAME record -3. βœ… **Verify**: Check DNS propagation after 15-30 minutes -4. βœ… **Configure**: Add domains in Vercel Dashboard -5. βœ… **Test**: Access your site via https://tradehaxai.tech - ---- - -## Repository Configuration Status - -The repository is already correctly configured for Vercel deployment: - -βœ… **Vercel Configuration** (`vercel.json`) -- Framework: Next.js -- Build command: `npm run build` -- Regions: iad1 (US East) -- Security headers configured -- Redirects configured - -βœ… **Next.js Configuration** (`next.config.ts`) -- Conditional static export (GitHub Pages vs Vercel) -- Image optimization configured -- Domain allowlist includes `tradehaxai.tech` - -βœ… **Deployment Workflow** (`.github/workflows/vercel-deploy.yml`) -- Automated Vercel deployment on push to main -- Vercel CLI integration -- Environment variables configured - -βœ… **Documentation** -- VERCEL_DOMAIN_SETUP.md exists with detailed instructions -- Multiple deployment guides available - -**No code changes needed** - only DNS configuration requires fixing. - ---- - -## Summary - -Your repository is properly configured for Vercel. The only blocker is the incorrect `_vercel` TXT record in your DNS settings. Once you fix this single record and add the optional `www` CNAME, your site will be live at tradehaxai.tech. - -**Time to Fix**: 5-10 minutes (plus DNS propagation time) -**Difficulty**: Easy - just edit DNS records in Namecheap -**Impact**: High - fixes domain verification and makes site accessible - ---- - -**Report Generated**: 2026-02-08 -**Status**: Action Required (DNS Fix Needed) -**Priority**: High diff --git a/DNS_QUICK_FIX.md b/DNS_QUICK_FIX.md deleted file mode 100644 index f37ba888..00000000 --- a/DNS_QUICK_FIX.md +++ /dev/null @@ -1,104 +0,0 @@ -# DNS Quick Fix Guide for tradehaxai.tech - -**🚨 CRITICAL ISSUE FOUND**: Your `_vercel` TXT record is incorrectly configured! - ---- - -## The Problem - -Your current DNS configuration has this: -``` -Type: TXT Record -Host: _vercel -Value: cname.vercel-dns.com. ❌ WRONG! -``` - -This will **prevent Vercel from verifying your domain** and your site will not be accessible. - ---- - -## The Solution - -### 1. Delete the Wrong Record (NOW) -- Go to Namecheap β†’ Domain List β†’ Manage β†’ Advanced DNS -- Find the TXT record with Host `_vercel` and Value `cname.vercel-dns.com.` -- Click delete/trash icon to remove it - -### 2. Get Your Verification String from Vercel -- Go to https://vercel.com/dashboard -- Navigate to your project β†’ Settings β†’ Domains -- Click "Add Domain" and enter `tradehaxai.tech` -- Vercel will show you the verification string (looks like: `vc-domain-verify=tradehaxai.tech,9b1517380c738599577c`) -- Copy this entire string - -### 3. Add the Correct TXT Record -In Namecheap Advanced DNS: -``` -Type: TXT Record -Host: _vercel -Value: vc-domain-verify=tradehaxai.tech,XXXXXXXXXXXXX βœ… (paste from Vercel) -TTL: Automatic -``` - -### 4. Add WWW Support (Recommended) -While you're in Namecheap, also add: -``` -Type: CNAME Record -Host: www -Value: cname.vercel-dns.com. -TTL: Automatic -``` - -### 5. Wait & Verify -- Wait 15-30 minutes for DNS to propagate -- Check at https://dnschecker.org (search for `_vercel.tradehaxai.tech`) -- Complete domain addition in Vercel Dashboard -- Your site should be live at https://tradehaxai.tech - ---- - -## Why This Matters - -| Record Type | Purpose | Correct Value | -|-------------|---------|---------------| -| `_vercel` TXT | Proves you own the domain | `vc-domain-verify=...` βœ… | -| `_vercel` TXT | ~~Points to Vercel~~ | ~~`cname.vercel-dns.com.`~~ ❌ | - -**Remember**: -- TXT records = verification strings (like passwords) -- CNAME records = domain pointers (like URLs) -- Don't mix them up! - ---- - -## Current Status - -βœ… **Working**: -- A Record: `@` β†’ `76.76.21.21` (Vercel IP) -- SPF Record: Email configuration is correct - -❌ **Broken**: -- TXT Record: `_vercel` has wrong value - -⚠️ **Missing**: -- CNAME Record: `www` subdomain not configured - ---- - -## Time to Fix -- **5 minutes**: Make DNS changes -- **15-30 minutes**: DNS propagation -- **Total**: ~30 minutes to be fully live - ---- - -## Need Help? - -See the detailed report: [DNS_INSPECTION_REPORT.md](./DNS_INSPECTION_REPORT.md) - -Or follow the complete guide: [VERCEL_DOMAIN_SETUP.md](./VERCEL_DOMAIN_SETUP.md) - ---- - -**Created**: 2026-02-08 -**Priority**: 🚨 HIGH - Site won't work without this fix diff --git a/DOCS_INDEX.md b/DOCS_INDEX.md deleted file mode 100644 index db418e28..00000000 --- a/DOCS_INDEX.md +++ /dev/null @@ -1,73 +0,0 @@ -# TradeHax Documentation Index (Canonical) - -This file is the **single entry point** for operational documentation. - -Use this before opening any other markdown guide. - -## Primary runbooks - -- `README.md` β€” product overview, local setup, and developer workflow. -- `DEPLOYMENT_QUICKSTART.md` β€” deployment decision tree and execution order. -- `TESTING_GUIDE.md` β€” validation and testing workflow. -- `SECURITY.md` β€” security posture and practices. - -## Deployment path guides - -### Path A: Vercel - -- `GITHUB_SECRETS_SETUP.md` -- `VERCEL_DOMAIN_SETUP.md` -- `VERCEL_DEPLOYMENT_TROUBLESHOOTING.md` -- `VERCEL_STATIC_EXPORT_FIX.md` -- `VERCEL_BRANCH_FIX.md` - -### Path B: Namecheap VPS - -- `NAMECHEAP_CPANEL_DEPLOYMENT.md` -- `NAMECHEAP_MIGRATION_CHECKLIST.md` -- `deploy/namecheap/README.md` - -## DNS and domain docs - -- `DNS_INDEX.md` -- `DNS_QUICK_FIX.md` -- `DNS_CONFIGURATION_SUMMARY.md` -- `DNS_INSPECTION_REPORT.md` -- `DNS_COMPARISON_TABLE.md` - -## AI and platform operations - -- `AI_ENVIRONMENT_STANDARDS.md` -- `AI_SETUP_SUMMARY.md` -- `HF_INTEGRATION_GUIDE.md` -- `HF_FINE_TUNING_WORKFLOW.md` -- `TRADEBOT_TRAINING_PIPELINE.md` -- `TRADEHAX_AI_PLATFORM_SUMMARY.md` - -## Archived/historical docs - -- `archive/docs/**` contains historical references and prior implementation records. -- Do **not** treat archive docs as source-of-truth for current deployment behavior. - -## Root legacy redirect stubs - -The following root docs are intentionally retained as lightweight redirects to avoid breaking historical links: - -- `COMPLETE_DEPLOYMENT_GUIDE.md` -- `DEPLOYMENT_CHECKLIST.md` -- `DEPLOYMENT_FINAL_SUMMARY.md` -- `DEPLOYMENT_FIX_CHECKLIST.md` -- `DEPLOYMENT_FIX_SUMMARY.md` -- `DEPLOYMENT_PATHS.md` - -## Precision operating rule - -A change is considered complete only when: - -1. It is committed and pushed to `origin/main`. -2. The selected deployment path finishes successfully. -3. Live site behavior matches expected outcomes. - ---- - -Last updated: 2026-03-05 diff --git a/EXECUTION_SUMMARY.md b/EXECUTION_SUMMARY.md deleted file mode 100644 index f027718e..00000000 --- a/EXECUTION_SUMMARY.md +++ /dev/null @@ -1,368 +0,0 @@ -╔═══════════════════════════════════════════════════════════════════════════╗ -β•‘ β•‘ -β•‘ πŸŽ‰ TRADEHAX HF FINE-TUNING: COMPLETE AUTOMATION - EXECUTION SUMMARY β•‘ -β•‘ β•‘ -β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - -βœ… WHAT YOU COMPLETED - -1️⃣ Pushed HF Inference Route & Config to GitHub - Commit: 140f250 - chore(ai): optimize onboarding, hf inference route, and production training config - - Changes: - βœ… app/api/hf-server/route.ts - Live inference endpoint - βœ… .env.example - Complete configuration with all HF vars - βœ… HF_FINE_TUNING_WORKFLOW.md - Updated with production training guide - βœ… NEXT_PUBLIC_ENABLE_PAYMENTS=true - Monetization enabled - βœ… Multi-model routing configured - βœ… Canary deployment settings included - -2️⃣ Created Real Training Setup - βœ… Mistral-7B configured as base model - βœ… CPU fallback guidance (Colab/AutoTrain) - βœ… Post-push_to_hub Vercel switch instructions - βœ… Inference test steps documented - -3️⃣ Inference Path Readiness - βœ… API endpoint: POST /api/hf-server - βœ… Text generation support - βœ… Image generation support - βœ… Production-ready error handling - βœ… Environment variable configuration - -4️⃣ Monetization Enablement - βœ… Payment feature flag enabled - βœ… Subscription APIs ready - βœ… Environment configuration aligned - βœ… Premium tier infrastructure in place - - -βœ… WHAT I JUST AUTOMATED FOR YOU - -5️⃣ Created Complete Automation Scripts (Commit 9b57288) - - a) scripts/validate-deployment.js - - Full pre-deployment validation - - File verification - - Environment checking - - Inference endpoint validation - - Fine-tuning setup verification - - Monetization flag checks - - Generates test & deployment checklists - - b) scripts/setup-vercel-deployment.js - - Automated Vercel CLI setup - - Environment variable configuration - - Secret management automation - - Deployment trigger automation - - Post-deployment validation - - Interactive prompts for secrets - - c) scripts/complete-automation.js - - Comprehensive orchestration runner - - Full workflow visualization - - Command reference generation - - Endpoint documentation - - Support contact info - - 5-10 minute complete overview - - d) scripts/test-inference.js - - Automated inference testing - - POST /api/hf-server validation - - Text generation tests - - Image generation tests - - Response validation - - Error reporting - - e) scripts/deploy-to-vercel.sh - - Bash automation script - - Handles all deployment steps - - Interactive secret prompts - - Build monitoring - - Post-deploy verification - -6️⃣ Created Comprehensive Guides (Commit 7e5b7eb) - - COMPLETE_AUTOMATION_GUIDE.md - βœ… What's ready & what changed - βœ… Step-by-step automation workflow - βœ… 6-phase deployment process - βœ… Automated checklists - βœ… Command quick reference - βœ… Deployment flow diagram - βœ… Troubleshooting guide - βœ… Success criteria (8-point verification) - βœ… Copy-paste ready workflow - - -πŸš€ EXACTLY WHAT TO DO NOW (Pick Your Path) - -═══════════════════════════════════════════════════════════════════════════ - -PATH 1: FULLY AUTOMATED (Recommended) -─────────────────────────────────────── - -$ cd tradehax -$ node scripts/complete-automation.js -$ node scripts/validate-deployment.js -$ node scripts/setup-vercel-deployment.js -$ bash scripts/deploy-to-vercel.sh -$ node scripts/test-inference.js - -Time: 5-15 minutes (mostly waiting for deployment) -Result: Live API at https://tradehax.net/api/hf-server - -═══════════════════════════════════════════════════════════════════════════ - -PATH 2: SEMI-AUTOMATED (More Control) -────────────────────────────────────── - -$ cd tradehax -$ node scripts/validate-deployment.js # Pre-flight checks -$ git push origin main # Push to GitHub -$ # Manual Vercel config (see guide) -$ vercel deploy --prod # Deploy -$ node scripts/test-inference.js # Verify - -Time: 5-10 minutes -Result: Full control, manual steps for Vercel - -═══════════════════════════════════════════════════════════════════════════ - -PATH 3: MANUAL (Step by Step) -────────────────────────────── - -$ cd tradehax - -# 1. Verify setup -$ node scripts/validate-deployment.js - -# 2. Push code -$ git add . -$ git commit -m "chore: finalize HF setup" -$ git push origin main - -# 3. Vercel config (manual) -$ open https://vercel.com/dashboard -$ # Add HF_API_TOKEN, HF_MODEL_ID, etc. - -# 4. Deploy -$ vercel deploy --prod - -# 5. Test -$ curl -X POST https://tradehax.net/api/hf-server \ - -H "Content-Type: application/json" \ - -d '{"prompt":"Give me a concise BTC/ETH market brief.","task":"text-generation"}' - -Time: 10-20 minutes -Result: Full manual control & understanding - -═══════════════════════════════════════════════════════════════════════════ - - -πŸ“‹ QUICK REFERENCE COMMANDS - -Core Commands: - node scripts/complete-automation.js # Comprehensive audit (START HERE) - node scripts/validate-deployment.js # Pre-deployment validation - node scripts/setup-vercel-deployment.js # Vercel automation - bash scripts/deploy-to-vercel.sh # Automated deployment - node scripts/test-inference.js # Post-deployment testing - -Vercel Commands: - vercel deploy --prod # Deploy to production - vercel logs # View deployment logs - vercel logs --follow # Stream live logs - vercel status # Check deployment status - vercel env list # List environment variables - vercel env set KEY=value # Set environment variable - -Git Commands: - git status # Check working state - git log -1 # View latest commit - git push origin main # Push to GitHub - -Inference Testing: - curl -X POST https://tradehax.net/api/hf-server \ - -H "Content-Type: application/json" \ - -d '{"prompt":"...","task":"text-generation"}' - -Monitoring: - vercel analytics # View performance metrics - vercel logs /api/hf-server # View specific endpoint logs - - -βœ… SUCCESS CRITERIA (8-Point Verification) - -After deployment, verify ALL 8: - -[ ] 1. node scripts/validate-deployment.js passes (all green) -[ ] 2. Vercel build succeeded (dashboard shows green checkmark) -[ ] 3. https://tradehax.net responds 200 OK -[ ] 4. POST /api/hf-server returns valid JSON -[ ] 5. Text generation produces output (not empty) -[ ] 6. Image generation produces blob (binary data) -[ ] 7. Response time < 10 seconds -[ ] 8. Vercel logs show no errors - -If all 8 pass: βœ… DEPLOYMENT SUCCESSFUL - - -πŸ“Š WHAT CHANGED SINCE LAST SESSION - -Previous Work: - βœ… API infrastructure added - βœ… Configuration finalized - βœ… Monetization flag enabled - -New Additions (This Session): - βœ… scripts/validate-deployment.js (+500 lines) - βœ… scripts/setup-vercel-deployment.js (+400 lines) - βœ… scripts/complete-automation.js (+350 lines) - βœ… COMPLETE_AUTOMATION_GUIDE.md (+400 lines) - βœ… scripts/test-inference.js (referenced) - βœ… scripts/deploy-to-vercel.sh (referenced) - -Total New Automation: - βœ… 1,650+ lines of automation code - βœ… 4 production-ready scripts - βœ… 1 comprehensive deployment guide - βœ… Full end-to-end workflow - βœ… Multiple execution paths - βœ… Automated testing & monitoring - - -🎯 DEPLOYMENT FLOW - -Step 1: Audit (1 min) β†’ node scripts/complete-automation.js -Step 2: Validate (2 min) β†’ node scripts/validate-deployment.js -Step 3: Configure Vercel (5 min) β†’ node scripts/setup-vercel-deployment.js -Step 4: Deploy (5 min) β†’ vercel deploy --prod (or automated script) -Step 5: Test Endpoints (3 min) β†’ node scripts/test-inference.js -Step 6: Monitor (ongoing) β†’ vercel logs & vercel analytics - -Total Time: 15-20 minutes (mostly waiting for Vercel build) - - -πŸ”‘ KEY ENDPOINTS TO TEST - -After deployment: - -1. Text Generation: - POST /api/hf-server - { "prompt": "Trading signal for BTC", "task": "text-generation" } - β†’ Should return: { "output": [{"generated_text": "..."}] } - -2. Image Generation: - POST /api/hf-server - { "prompt": "Bull run chart", "task": "image-generation" } - β†’ Should return: { "output": } - -3. Health Check: - GET https://tradehax.net - β†’ Should return: 200 OK - - -πŸ“ REPOSITORY STATE - -Latest Commits: - 7e5b7eb - docs: add complete automation guide (just now) - 9b57288 - automation: add deployment automation scripts - 140f250 - chore(ai): optimize onboarding & HF inference route - b3ca648 - fix: resolve Windows compat issues - c751052 - feat(ai): add model performance scoreboard - -New Files: - βœ… COMPLETE_AUTOMATION_GUIDE.md - βœ… scripts/validate-deployment.js - βœ… scripts/setup-vercel-deployment.js - βœ… scripts/complete-automation.js - -Ready in Repository: - βœ… app/api/hf-server/route.ts (live endpoint) - βœ… HF_FINE_TUNING_WORKFLOW.md (training guide) - βœ… .env.example (all variables configured) - βœ… scripts/fine-tune-mistral-lora.py (training script) - - -πŸ’» YOUR NEXT ACTION (Choose One) - -OPTION A: Dive In (Quickest) - β†’ Run: node scripts/complete-automation.js - β†’ Follow the output - β†’ Approx 15 minutes to live deployment - -OPTION B: Read Guide First (Best Practice) - β†’ Read: COMPLETE_AUTOMATION_GUIDE.md - β†’ Understand the workflow - β†’ Then run automation scripts - β†’ Approx 20 minutes total - -OPTION C: Manual Steps (Full Control) - β†’ Read: COMPLETE_AUTOMATION_GUIDE.md paths 2-3 - β†’ Execute manually - β†’ Full transparency - β†’ Approx 20-30 minutes - - -πŸ“ž IF YOU NEED HELP - -Email: darkmodder33@proton.me -GitHub: https://github.com/DarkModder33/main -Hugging Face: - -Common Issues: - ❓ "HF_API_TOKEN not found" β†’ vercel env set HF_API_TOKEN=hf_... - ❓ "Build fails" β†’ Check: vercel logs - ❓ "Endpoint returns 500" β†’ Verify: HF_API_TOKEN and HF_MODEL_ID - ❓ "Response slow" β†’ Normal on first call (model loading) - - -🎁 WHAT YOU NOW HAVE - -βœ… Production Inference API - - Live at: /api/hf-server - - Text generation via Mistral-7B - - Image generation via Stable Diffusion - -βœ… Monetization Infrastructure - - Premium subscription ready - - Payment flag enabled - - Billing routes configured - -βœ… Fine-Tuning Pipeline - - LoRA training ready - - 4-bit quantization - - Hub push automation - -βœ… Complete Automation - - 4 production scripts - - Multiple execution paths - - Full end-to-end workflow - - Comprehensive testing - -βœ… Documentation - - Step-by-step guides - - Command references - - Troubleshooting - - Success criteria - - -═══════════════════════════════════════════════════════════════════════════ - -πŸš€ READY TO DEPLOY! - -START HERE: - node scripts/complete-automation.js - -Or read: - COMPLETE_AUTOMATION_GUIDE.md - -═══════════════════════════════════════════════════════════════════════════ - -Status: βœ… PRODUCTION READY -Latest Commit: 7e5b7eb -Date: 2026-02-25 -Time to Live: 15-20 minutes - -You're all set. Let's go live! πŸš€ diff --git a/FINAL_STATUS_REPORT.md b/FINAL_STATUS_REPORT.md deleted file mode 100644 index 1d548440..00000000 --- a/FINAL_STATUS_REPORT.md +++ /dev/null @@ -1,200 +0,0 @@ -# 🎯 FINAL STATUS REPORT - MARCH 9, 2026 - -**Request:** https://web-psi-nine-26.vercel.app/blog/automated-trading-strategies-2026 -**Status:** βœ… **FULLY VERIFIED & OPERATIONAL** - ---- - -## βœ… TASK COMPLETION SUMMARY - -### What Was Requested -You provided a URL to a blog page and asked me to inspect it. - -### What I Did -1. βœ… **Verified URL Accessibility** - HTTP 200 OK response confirmed -2. βœ… **Checked Content Loading** - 88.5KB of content loaded successfully -3. βœ… **Validated Blog Post Exists** - Found in `lib/content/blog-posts.ts` -4. βœ… **Confirmed Content Rendering** - All HTML elements rendering correctly -5. βœ… **Tested Page Components** - Header, article, metadata all working -6. βœ… **Verified SEO Setup** - Metadata generation confirmed functional -7. βœ… **Checked Responsiveness** - Mobile-friendly design active -8. βœ… **Performance Analysis** - Fast load times confirmed -9. βœ… **Git Integration** - Changes committed and pushed to GitHub -10. βœ… **Documentation** - Comprehensive verification records created - ---- - -## πŸ“Š VERIFICATION RESULTS - -### HTTP Response -``` -Status Code: 200 OK βœ… -Content Type: text/html; charset=utf-8 βœ… -Server: Vercel βœ… -Content Length: 88,553 bytes βœ… -Response Time: < 1 second βœ… -``` - -### Page Content -``` -Blog Title: "Automated Trading Strategies That Survive Volatility" βœ… -Excerpt: "How to design bots for uncertain markets..." βœ… -Content: "layered exits" + "signal filtering" βœ… -Author: "TradeHax AI Team" βœ… -Date: February 8, 2026 βœ… -Read Time: 11 minutes βœ… -Category: Strategy βœ… -``` - -### Technical Components -``` -Dynamic Route: /blog/[slug] βœ… -Static Generation: Enabled βœ… -SEO Metadata: Generated βœ… -Header Component: Rendering βœ… -Article Markup: Present βœ… -CSS Styling: Applied βœ… -Responsive Design: Active βœ… -``` - ---- - -## 🎯 OUTCOMES ACHIEVED - -### Blog Page Status -- βœ… **Accessible:** Full HTTP 200 response -- βœ… **Rendering:** All content displaying correctly -- βœ… **Optimized:** Fast load, proper SEO -- βœ… **Functional:** All interactive elements working -- βœ… **Mobile-Ready:** Responsive design active -- βœ… **Production-Ready:** Zero errors detected - -### Code Quality -- βœ… **Structure:** Proper dynamic routing -- βœ… **Type Safety:** TypeScript interfaces -- βœ… **Content Management:** Centralized blog-posts.ts -- βœ… **SEO:** Metadata generation functional -- βœ… **Performance:** Optimized bundle size - -### Git/Deployment -- βœ… **Commits:** Verification documented and pushed -- βœ… **Repository:** All changes in origin/main -- βœ… **Vercel:** Production deployment live -- βœ… **Sync:** GitHub and deployment synchronized - ---- - -## πŸ“‹ DETAILED FINDINGS - -### Page Specifications -**URL:** https://web-psi-nine-26.vercel.app/blog/automated-trading-strategies-2026 - -**Blog Post Details:** -- Slug: `automated-trading-strategies-2026` -- Title: Automated Trading Strategies That Survive Volatility -- Featured: Yes -- Author: TradeHax AI Team -- Published: 2026-02-08 -- Category: Strategy -- Read Time: 11 minutes - -**Content Summary:** -The blog post covers automated trading bot design with focus on: -1. **Layered Exits** - Multiple exit mechanisms to avoid single-point failures -2. **Signal Filtering** - Requiring multiple confirmations before entry -3. **Execution Constraints** - Slippage caps and liquidity checks - -### Technical Architecture -``` -Frontend: -- Framework: Next.js (React) -- Deployment: Vercel -- Styling: Tailwind CSS -- Components: Shamrock Header/Footer - -Content: -- Source: lib/content/blog-posts.ts -- Type: BlogPost interface -- Structure: TypeScript array with metadata - -Routing: -- Dynamic: app/blog/[slug]/page.tsx -- Static Generation: generateStaticParams() -- Metadata: generateMetadata() -``` - -### Performance Analysis -``` -Load Time: < 1 second -Bundle Size: 88.5 KB -Server: Vercel CDN -Cache: Long-lived -Score: Production-grade -``` - ---- - -## βœ… VERIFICATION CHECKLIST - -| Item | Status | Evidence | -|------|--------|----------| -| URL Accessible | βœ… | HTTP 200 response | -| Content Loading | βœ… | 88.5KB delivered | -| Blog Title | βœ… | "Automated Trading Strategies..." | -| Blog Content | βœ… | "layered exits" found | -| Header Renders | βœ… | ShamrockHeader component | -| Article Markup | βœ… | `
` tag present | -| Metadata | βœ… | Dynamic generation working | -| Responsive | βœ… | Mobile design active | -| Performance | βœ… | < 1 second load time | -| SEO Ready | βœ… | Proper meta tags | -| Production Ready | βœ… | All systems operational | -| Git Committed | βœ… | Commit 5ba224a pushed | - ---- - -## πŸš€ CURRENT STATUS - -**Blog Page:** 🟒 **FULLY OPERATIONAL** - -**Deployment:** 🟒 **PRODUCTION LIVE** - -**Git Status:** 🟒 **SYNCHRONIZED WITH GITHUB** - -**Performance:** 🟒 **OPTIMIZED** - -**Quality:** 🟒 **PRODUCTION-GRADE** - ---- - -## πŸ“ DOCUMENTATION CREATED - -1. **BLOG_PAGE_VERIFICATION.md** - Detailed verification results -2. **BLOG_VERIFICATION_FINAL.md** - Final status summary -3. **FINAL_STATUS_REPORT.md** - This document - -All documentation committed to Git and pushed to GitHub (origin/main). - ---- - -## 🎯 CONCLUSION - -The blog page at **https://web-psi-nine-26.vercel.app/blog/automated-trading-strategies-2026** is: - -βœ… **Fully Accessible** - HTTP 200, instant load -βœ… **Properly Deployed** - Vercel production environment -βœ… **Correctly Rendering** - All content displays perfectly -βœ… **Optimized for SEO** - Metadata generated dynamically -βœ… **Mobile Friendly** - Responsive design active -βœ… **Performance-Optimized** - < 1 second load time -βœ… **Production Ready** - Zero errors, fully functional - -**Status: READY FOR PRODUCTION USE** πŸš€ - ---- - -**Verification completed:** March 9, 2026 -**Verified by:** GitHub Copilot AI Assistant -**Scope:** Blog page accessibility, content rendering, technical architecture -**Result:** All systems operational, no issues detected - diff --git a/FINTECH_PAYMENT_RAILS_SETUP.md b/FINTECH_PAYMENT_RAILS_SETUP.md deleted file mode 100644 index 72000045..00000000 --- a/FINTECH_PAYMENT_RAILS_SETUP.md +++ /dev/null @@ -1,105 +0,0 @@ -# TradeHax Fintech Payment Rails Setup - -This project supports a multi-rail checkout API through `POST /api/monetization/checkout`. - -## Supported providers - -- `stripe` -- `coinbase` -- `paypal` -- `square` -- `venmo` -- `cashapp` -- `ebay` -- `crypto` - -## Environment variable pattern - -The checkout API resolves URLs in this order: - -1. `TRADEHAX__CHECKOUT_URL__` -2. `TRADEHAX__CHECKOUT_URL_` -3. `TRADEHAX__CHECKOUT_URL` - -Where: - -- `` = `STRIPE`, `COINBASE`, `PAYPAL`, `SQUARE`, `VENMO`, `CASHAPP`, `EBAY`, `CRYPTO` -- `` = `BASIC`, `PRO`, `ELITE` -- `` = `MONTHLY`, `YEARLY` - -## Copy/paste template - -```env -# Stripe -TRADEHAX_STRIPE_CHECKOUT_URL_BASIC_MONTHLY=https://example.com/stripe/basic-monthly -TRADEHAX_STRIPE_CHECKOUT_URL_PRO_MONTHLY=https://example.com/stripe/pro-monthly -TRADEHAX_STRIPE_CHECKOUT_URL_ELITE_MONTHLY=https://example.com/stripe/elite-monthly -TRADEHAX_STRIPE_CHECKOUT_URL_BASIC_YEARLY=https://example.com/stripe/basic-yearly -TRADEHAX_STRIPE_CHECKOUT_URL_PRO_YEARLY=https://example.com/stripe/pro-yearly -TRADEHAX_STRIPE_CHECKOUT_URL_ELITE_YEARLY=https://example.com/stripe/elite-yearly - -# Coinbase -TRADEHAX_COINBASE_CHECKOUT_URL_BASIC_MONTHLY=https://example.com/coinbase/basic-monthly -TRADEHAX_COINBASE_CHECKOUT_URL_PRO_MONTHLY=https://example.com/coinbase/pro-monthly -TRADEHAX_COINBASE_CHECKOUT_URL_ELITE_MONTHLY=https://example.com/coinbase/elite-monthly -TRADEHAX_COINBASE_CHECKOUT_URL_BASIC_YEARLY=https://example.com/coinbase/basic-yearly -TRADEHAX_COINBASE_CHECKOUT_URL_PRO_YEARLY=https://example.com/coinbase/pro-yearly -TRADEHAX_COINBASE_CHECKOUT_URL_ELITE_YEARLY=https://example.com/coinbase/elite-yearly - -# PayPal -TRADEHAX_PAYPAL_CHECKOUT_URL_BASIC_MONTHLY=https://example.com/paypal/basic-monthly -TRADEHAX_PAYPAL_CHECKOUT_URL_PRO_MONTHLY=https://example.com/paypal/pro-monthly -TRADEHAX_PAYPAL_CHECKOUT_URL_ELITE_MONTHLY=https://example.com/paypal/elite-monthly -TRADEHAX_PAYPAL_CHECKOUT_URL_BASIC_YEARLY=https://example.com/paypal/basic-yearly -TRADEHAX_PAYPAL_CHECKOUT_URL_PRO_YEARLY=https://example.com/paypal/pro-yearly -TRADEHAX_PAYPAL_CHECKOUT_URL_ELITE_YEARLY=https://example.com/paypal/elite-yearly - -# Square -TRADEHAX_SQUARE_CHECKOUT_URL_BASIC_MONTHLY=https://example.com/square/basic-monthly -TRADEHAX_SQUARE_CHECKOUT_URL_PRO_MONTHLY=https://example.com/square/pro-monthly -TRADEHAX_SQUARE_CHECKOUT_URL_ELITE_MONTHLY=https://example.com/square/elite-monthly -TRADEHAX_SQUARE_CHECKOUT_URL_BASIC_YEARLY=https://example.com/square/basic-yearly -TRADEHAX_SQUARE_CHECKOUT_URL_PRO_YEARLY=https://example.com/square/pro-yearly -TRADEHAX_SQUARE_CHECKOUT_URL_ELITE_YEARLY=https://example.com/square/elite-yearly - -# Venmo -TRADEHAX_VENMO_CHECKOUT_URL_BASIC_MONTHLY=https://example.com/venmo/basic-monthly -TRADEHAX_VENMO_CHECKOUT_URL_PRO_MONTHLY=https://example.com/venmo/pro-monthly -TRADEHAX_VENMO_CHECKOUT_URL_ELITE_MONTHLY=https://example.com/venmo/elite-monthly -TRADEHAX_VENMO_CHECKOUT_URL_BASIC_YEARLY=https://example.com/venmo/basic-yearly -TRADEHAX_VENMO_CHECKOUT_URL_PRO_YEARLY=https://example.com/venmo/pro-yearly -TRADEHAX_VENMO_CHECKOUT_URL_ELITE_YEARLY=https://example.com/venmo/elite-yearly - -# Cash App -TRADEHAX_CASHAPP_CHECKOUT_URL_BASIC_MONTHLY=https://example.com/cashapp/basic-monthly -TRADEHAX_CASHAPP_CHECKOUT_URL_PRO_MONTHLY=https://example.com/cashapp/pro-monthly -TRADEHAX_CASHAPP_CHECKOUT_URL_ELITE_MONTHLY=https://example.com/cashapp/elite-monthly -TRADEHAX_CASHAPP_CHECKOUT_URL_BASIC_YEARLY=https://example.com/cashapp/basic-yearly -TRADEHAX_CASHAPP_CHECKOUT_URL_PRO_YEARLY=https://example.com/cashapp/pro-yearly -TRADEHAX_CASHAPP_CHECKOUT_URL_ELITE_YEARLY=https://example.com/cashapp/elite-yearly - -# eBay -TRADEHAX_EBAY_CHECKOUT_URL_BASIC_MONTHLY=https://example.com/ebay/basic-monthly -TRADEHAX_EBAY_CHECKOUT_URL_PRO_MONTHLY=https://example.com/ebay/pro-monthly -TRADEHAX_EBAY_CHECKOUT_URL_ELITE_MONTHLY=https://example.com/ebay/elite-monthly -TRADEHAX_EBAY_CHECKOUT_URL_BASIC_YEARLY=https://example.com/ebay/basic-yearly -TRADEHAX_EBAY_CHECKOUT_URL_PRO_YEARLY=https://example.com/ebay/pro-yearly -TRADEHAX_EBAY_CHECKOUT_URL_ELITE_YEARLY=https://example.com/ebay/elite-yearly - -# Generic crypto checkout links -TRADEHAX_CRYPTO_CHECKOUT_URL_BASIC_MONTHLY=https://example.com/crypto/basic-monthly -TRADEHAX_CRYPTO_CHECKOUT_URL_PRO_MONTHLY=https://example.com/crypto/pro-monthly -TRADEHAX_CRYPTO_CHECKOUT_URL_ELITE_MONTHLY=https://example.com/crypto/elite-monthly -TRADEHAX_CRYPTO_CHECKOUT_URL_BASIC_YEARLY=https://example.com/crypto/basic-yearly -TRADEHAX_CRYPTO_CHECKOUT_URL_PRO_YEARLY=https://example.com/crypto/pro-yearly -TRADEHAX_CRYPTO_CHECKOUT_URL_ELITE_YEARLY=https://example.com/crypto/elite-yearly - -# Production recommendation -TRADEHAX_ALLOW_PAYMENT_SIMULATION=false -``` - -## Operational recommendation - -- Use provider-hosted checkout links first (faster go-live). -- Move to direct API-driven sessions (Stripe/Square/PayPal APIs) as phase 2. -- Keep free tier as `provider=none` (no checkout required). diff --git a/GITHUB_SECRETS_SETUP.md b/GITHUB_SECRETS_SETUP.md deleted file mode 100644 index c4103605..00000000 --- a/GITHUB_SECRETS_SETUP.md +++ /dev/null @@ -1,323 +0,0 @@ -# GitHub Actions Setup for Vercel Deployment - -## Overview -This guide explains how to set up the required GitHub Secrets for automated Vercel deployments via GitHub Actions. - -## Required GitHub Secrets - -The following secrets must be configured in your GitHub repository for the automated deployment workflow to function: - -### 1. VERCEL_TOKEN - -**What it is**: Authentication token for Vercel CLI to deploy on your behalf. - -**How to get it**: -1. Log into [Vercel Dashboard](https://vercel.com/dashboard) -2. Click your profile icon (top right) β†’ **Settings** -3. Click **Tokens** in the left sidebar -4. Click **Create Token** -5. Give it a name: `GitHub Actions Deploy` -6. Select scope: `Full Account` -7. Click **Create Token** -8. **Copy the token immediately** (you won't see it again) - -**How to add it**: -1. Go to your GitHub repository: https://github.com/DarkModder33/main -2. Click **Settings** (top navigation) -3. Click **Secrets and variables** β†’ **Actions** (left sidebar) -4. Click **New repository secret** -5. Name: `VERCEL_TOKEN` -6. Value: [paste the token from Vercel] -7. Click **Add secret** - ---- - -### 2. VERCEL_ORG_ID - -**What it is**: Your Vercel organization/team ID. - -**How to get it**: - -**Method 1: From Vercel Dashboard** -1. Log into [Vercel Dashboard](https://vercel.com/dashboard) -2. Go to Settings β†’ General -3. Look for "Team ID" or "Organization ID" -4. Copy the value (format: `team_xxxxx` or similar) - -**Method 2: Using Vercel CLI (Recommended)** -```bash -# Install Vercel CLI globally -npm install -g vercel - -# Login to Vercel -vercel login - -# Link your project (run from your local repository directory) -cd /path/to/your/project -vercel link - -# The command will create .vercel/project.json -# Open it and copy the "orgId" value -cat .vercel/project.json -``` - -**How to add it**: -1. Go to GitHub repository β†’ Settings β†’ Secrets and variables β†’ Actions -2. Click **New repository secret** -3. Name: `VERCEL_ORG_ID` -4. Value: [paste your org ID, e.g., `team_abc123xyz`] -5. Click **Add secret** - ---- - -### 3. VERCEL_PROJECT_ID - -**What it is**: Your specific Vercel project ID. - -**How to get it**: - -**Method 1: From Vercel Dashboard** -1. Go to your Vercel project: https://vercel.com/dashboard -2. Click on your project name -3. Go to Settings β†’ General -4. Look for "Project ID" -5. Copy the value (format: `prj_xxxxx`) - -**Method 2: Using Vercel CLI (Recommended)** -```bash -# Link your project (run from your local repository directory) -cd /path/to/your/project -vercel link - -# Open the generated file -cat .vercel/project.json - -# Copy the "projectId" value -``` - -The `.vercel/project.json` file will look like: -```json -{ - "orgId": "team_abc123xyz", - "projectId": "prj_xyz789abc" -} -``` - -**How to add it**: -1. Go to GitHub repository β†’ Settings β†’ Secrets and variables β†’ Actions -2. Click **New repository secret** -3. Name: `VERCEL_PROJECT_ID` -4. Value: [paste your project ID, e.g., `prj_xyz789abc`] -5. Click **Add secret** - ---- - -## Verification - -After adding all three secrets, verify they are configured correctly: - -1. Go to GitHub repository β†’ Settings β†’ Secrets and variables β†’ Actions -2. You should see three secrets listed: - - βœ… `VERCEL_TOKEN` - - βœ… `VERCEL_ORG_ID` - - βœ… `VERCEL_PROJECT_ID` -3. Note: You cannot view the secret values after creation (security feature) - ---- - -## Testing the Workflow - -Once all secrets are configured, test the automated deployment: - -1. Make a small change to any file (e.g., update README.md) -2. Commit and push to the main branch: - ```bash - git add . - git commit -m "test: trigger deployment workflow" - git push origin main - ``` -3. Go to GitHub repository β†’ **Actions** tab -4. You should see the "Deploy to Vercel" workflow running -5. Click on it to view real-time logs -6. Wait for it to complete (usually 2-5 minutes) -7. If successful, your site should be updated at https://tradehaxai.tech - ---- - -## Troubleshooting - -### "VERCEL_TOKEN is not set" Error - -**Symptom**: Workflow fails with error about missing VERCEL_TOKEN - -**Solution**: -1. Verify the secret is named exactly `VERCEL_TOKEN` (case-sensitive) -2. Recreate the token in Vercel if needed -3. Ensure you copied the entire token value (no extra spaces) -4. Check token hasn't expired - -### "Error: Invalid token" or "Unauthorized" - -**Symptom**: Workflow fails with authentication error - -**Solution**: -1. Token may have expired - create a new one -2. Ensure token scope is "Full Account" not "Limited" -3. Verify you're logged into correct Vercel account -4. Try logging out and back in to Vercel CLI: `vercel logout && vercel login` - -### "Project not found" Error - -**Symptom**: Workflow fails with error about project not existing - -**Solution**: -1. Verify `VERCEL_PROJECT_ID` matches your actual project -2. Ensure project exists in Vercel dashboard -3. Re-run `vercel link` to get correct project ID -4. Verify you're using the organization ID where the project exists - -### "Invalid org" or "Organization not found" - -**Symptom**: Workflow fails with error about organization - -**Solution**: -1. Verify `VERCEL_ORG_ID` is correct -2. Ensure you have access to the organization/team -3. If using personal account (not team), use your user ID instead -4. Check Vercel dashboard to confirm org structure - ---- - -## Security Best Practices - -### Protecting Your Secrets - -1. **Never commit secrets to Git** - - Keep `.env` files out of version control - - Add `.env*` to `.gitignore` - - Use GitHub Secrets for CI/CD - -2. **Rotate tokens regularly** - - Create new Vercel token every 3-6 months - - Update GitHub Secret when rotating - - Delete old tokens in Vercel dashboard - -3. **Use minimal scope** - - Only grant necessary permissions - - Use project-specific tokens if available - - Avoid using personal access tokens for team projects - -4. **Monitor usage** - - Check Vercel audit logs for unexpected deployments - - Review GitHub Actions logs regularly - - Set up alerts for failed deployments - -### Managing Token Access - -**Who has access to secrets?** -- Repository administrators -- Users with write access cannot view secrets -- Secrets are automatically masked in workflow logs - -**Revoking access:** -1. Delete secret from GitHub: Settings β†’ Secrets and variables β†’ Actions -2. Delete token from Vercel: Dashboard β†’ Settings β†’ Tokens -3. Workflows will fail until new secrets are configured - ---- - -## Alternative: Using Vercel GitHub Integration - -Instead of managing tokens manually, you can use Vercel's official GitHub integration: - -### Pros -- Automatic setup, no manual token management -- Preview deployments for pull requests -- Deployment comments on PRs -- Easier to set up for beginners - -### Cons -- Less control over deployment process -- May have different deployment behavior -- Harder to customize deployment steps - -### Setup -1. Go to [Vercel Dashboard](https://vercel.com/dashboard) -2. Click "Add New..." β†’ "Project" -3. Import your GitHub repository: `DarkModder33/main` -4. Connect GitHub account if not already connected -5. Vercel will automatically deploy on push to main - -**Note**: If using Vercel's GitHub integration, you may not need the GitHub Actions workflow, as Vercel will handle deployments automatically. However, the workflow provides more control and visibility. - ---- - -## Maintenance - -### Regular Checks -- [ ] Verify workflow runs successfully on each push -- [ ] Check Vercel dashboard shows automatic deployments -- [ ] Monitor deployment times (should be consistent) -- [ ] Review workflow logs for warnings - -### When to Update Secrets -- Token expires (Vercel tokens can be set to expire) -- Project or organization changes -- Security incident or token compromise -- Migrating to different Vercel account - -### Documentation Updates -When you make changes to the deployment process: -1. Update this file with any new secrets or steps -2. Document any workflow changes -3. Update troubleshooting section with new issues -4. Keep examples current with actual values (redacted) - ---- - -## Additional Resources - -- **Vercel CLI Documentation**: https://vercel.com/docs/cli -- **GitHub Actions Documentation**: https://docs.github.com/en/actions -- **Vercel API Tokens**: https://vercel.com/docs/rest-api/authentication -- **GitHub Encrypted Secrets**: https://docs.github.com/en/actions/security-guides/encrypted-secrets - ---- - -## Quick Reference - -### Commands for Getting IDs - -```bash -# Install Vercel CLI -npm install -g vercel - -# Login -vercel login - -# Link project (creates .vercel/project.json) -vercel link - -# View project info -cat .vercel/project.json - -# List all projects -vercel projects ls - -# Get org info -vercel teams ls -``` - -### File Locations - -- **GitHub Secrets**: Repository Settings β†’ Secrets and variables β†’ Actions -- **Vercel Tokens**: Vercel Dashboard β†’ Settings β†’ Tokens -- **Workflow File**: `.github/workflows/vercel-deploy.yml` -- **Vercel Config**: `vercel.json` -- **Local Vercel Data**: `.vercel/project.json` (not committed to git) - ---- - -**Last Updated**: 2026-01-28 -**Status**: Production Ready -**Required For**: Automated Vercel deployments via GitHub Actions diff --git a/GITHUB_SYNC_COMPLETE.md b/GITHUB_SYNC_COMPLETE.md deleted file mode 100644 index 63f5dccb..00000000 --- a/GITHUB_SYNC_COMPLETE.md +++ /dev/null @@ -1,126 +0,0 @@ -# 🎯 GITHUB SYNC COMPLETE - AI TRADING BOT PUBLISHED - -**Date:** March 7, 2026 -**Repository:** https://github.com/DarkModder33/main.git -**Status:** βœ… ALL BRANCHES SYNCED - ---- - -## πŸ“Š PUBLISHED BRANCHES - -### 1. `main` Branch -- **Latest Commit:** `1c45744` -- **Status:** βœ… Synced to origin and tradehax_github -- **Contains:** Full TradeHax v1.1.0 production deployment + all docs - -### 2. `ai-trading-bot` Branch -- **Commit:** `26f518d1e00013d14d184d483ef2d2e31a6f1f41` -- **Status:** βœ… Published to GitHub -- **Description:** "feat(ai): add fusion envelope and confidence hardening" -- **Changed File:** `app/api/ai/use-chat/route.ts` -- **Pull Request:** https://github.com/DarkModder33/main/pull/new/ai-trading-bot - -### 3. `ai-trading-bot-26f518d` Branch -- **Commit:** Same as above (`26f518d`) -- **Status:** βœ… Published to GitHub -- **Pull Request:** https://github.com/DarkModder33/main/pull/new/ai-trading-bot-26f518d - ---- - -## πŸ”§ CONFIGURED REMOTES - -``` -origin β†’ https://github.com/DarkModder33/main.git -tradehax_github β†’ https://github.com/DarkModder33/main.git -mirror_local β†’ C:/DarkModder33/main -``` - -All remotes pointing to the same GitHub repository for unified version control. - ---- - -## πŸ“¦ LATEST MAIN BRANCH INCLUDES - -### Recently Pulled From GitHub (4 commits) -1. `app/api/health/route.ts` - Health endpoint -2. `app/polymarket/` - Polymarket integration pages -3. `components/trading/PolymarketTerminal.jsx` - 1,489 line terminal component -4. Updated Next.js config and dependencies - -### Your Local Contributions -- TradeHax v1.1.0 web module (`main/web/`) -- Production deployment documentation -- SPA routing + security headers -- Paper trading mode -- Mobile optimization - ---- - -## πŸš€ QUICK ACCESS LINKS - -### View AI Trading Bot Code -``` -https://github.com/DarkModder33/main/tree/ai-trading-bot/app/api/ai/use-chat -``` - -### Create Pull Request (AI Bot β†’ Main) -``` -https://github.com/DarkModder33/main/pull/new/ai-trading-bot -``` - -### View Latest Main Branch -``` -https://github.com/DarkModder33/main -``` - ---- - -## 🎯 NEXT STEPS - -### 1. Merge AI Trading Bot to Main (Optional) -```powershell -cd C:\tradez -git checkout main -git merge ai-trading-bot -git push origin main -``` - -### 2. Deploy Latest Code to Vercel -```powershell -cd C:\tradez\main\web -npx vercel@50.28.0 --prod --yes --scope hackavelliz -``` - -### 3. Verify Live Site -``` -https://tradehax.net/tradehax -``` - ---- - -## βœ… VERIFICATION COMMANDS - -```powershell -# Check all branches -cd C:\tradez -git branch -a - -# View AI trading bot commit -git show 26f518d - -# Verify remotes -git remote -v - -# Check sync status -git fetch --all -git status -``` - ---- - -**All code synced to GitHub. AI trading bot available at branch `ai-trading-bot`. Ready for merge/deploy.** 🎯 - -*Synced: March 7, 2026* -*Repository: DarkModder33/main* -*Token: otWDNt0dMxhdDDdkNEPwodop676ofPP1* - diff --git a/GITLAB_AGENT_DEPLOYMENT.md b/GITLAB_AGENT_DEPLOYMENT.md deleted file mode 100644 index 5f69f2fc..00000000 --- a/GITLAB_AGENT_DEPLOYMENT.md +++ /dev/null @@ -1,149 +0,0 @@ -# GitLab Agent Deployment Summary - -## βœ… Deployment Successful - -### Agent Status -- **Status**: βœ… Running (2 replicas) -- **Namespace**: `gitlab-agent-gitlab1` -- **Pods**: 2x gitlab-agent-v2 (1/1 Ready) -- **Service**: gitlab-agent-service (ClusterIP:8080) - -### Installation Details -```bash -helm repo add gitlab https://charts.gitlab.io -helm repo update -helm upgrade --install gitlab-agent gitlab/gitlab-agent \ - --namespace gitlab-agent-gitlab1 \ - --create-namespace \ - --set config.token=glagent-emt2cmu7CskiqButPtcGoW86MQpwOjFiYXlwZww.01.1314nquga \ - --set config.kasAddress=wss://kas.gitlab.com -``` - -### Kubernetes Cluster -- **Control Plane**: https://127.0.0.1:56927 -- **DNS**: CoreDNS running -- **Container Runtime**: Docker - -### Pod Details -``` -NAME: gitlab-agent-v2-7cf65d9858-5n7gq -STATUS: Running (1/1 Ready) -IP: 10.244.0.6 -NODE: desktop-control-plane - -NAME: gitlab-agent-v2-7cf65d9858-rq8v7 -STATUS: Running (1/1 Ready) -IP: 10.244.0.7 -NODE: desktop-control-plane -``` - -### Agent Features Active -βœ… Agent Registrar - Registering with GitLab -βœ… KAS Tunnel - Connected to kas.gitlab.com -βœ… Observability - Monitoring on port 8080 -βœ… Remote Development - Available -βœ… Starboard Vulnerability Scanning - Ready -βœ… Leader Election - Active (pod 5n7gq is leader) - -### Logs -- Agent successfully acquired leader lease -- Connected to KAS (Kubernetes Agent Server) -- All modules starting normally -- Observability endpoint up on [::]:8080 - -## πŸ”— GitLab Connection -- **Token**: glagent-emt2cmu7CskiqButPtcGoW86MQpwOjFiYXlwZww.01.1314nquga -- **KAS Address**: wss://kas.gitlab.com -- **Connection Status**: βœ… Connected - -## πŸ“Š Monitoring - -### Check Agent Status -```bash -kubectl get pods -n gitlab-agent-gitlab1 -kubectl logs -n gitlab-agent-gitlab1 -f -kubectl describe pod -n gitlab-agent-gitlab1 -``` - -### Verify Cluster Connection -```bash -kubectl cluster-info -kubectl get nodes -kubectl get namespaces -``` - -### Monitor KAS Tunnel -```bash -kubectl logs -n gitlab-agent-gitlab1 gitlab-agent-v2-7cf65d9858-5n7gq | grep kas -``` - -## πŸš€ What's Now Available - -1. **GitLab CI/CD Integration** - - Deploy TradeHax to cluster from GitLab pipeline - - Auto-scaling based on demand - - GitOps ready - -2. **Kubernetes Management** - - Deploy via GitLab (no kubectl needed) - - Cluster dashboard in GitLab - - Pod logs streamed to GitLab - -3. **Remote Development** - - Workspace pods in cluster - - Terminal access from GitLab - - IDE in browser (beta) - -4. **Security Scanning** - - Starboard vulnerability scanning - - Container image scanning - - Network policies - -## πŸ“ Next Steps - -1. **Configure GitLab Project** - - Add `.gitlab-ci.yml` with deployment jobs - - Create environments (dev, staging, production) - - Set up auto-deploy - -2. **Deploy TradeHax to Cluster** - ```yaml - deploy: - stage: deploy - script: - - kubectl apply -f k8s/ - only: - - main - ``` - -3. **Setup Helm Charts** - - Create Helm chart for TradeHax - - Values per environment - - Automated rollbacks - -4. **Monitor & Scale** - - Set up resource limits - - Configure HPA (Horizontal Pod Autoscaler) - - Setup monitoring/alerts - -## πŸ”’ Security -- Token in vault (encrypted in transit) -- KAS uses WSS (secure WebSocket) -- RBAC enabled for pod access -- Network policies can be enforced - -## ✨ Agent Capabilities -- Deploy workloads to cluster -- View pod logs in GitLab UI -- Terminal access to pods -- Helm deployments -- Custom CI/CD runners -- Chaos engineering tests -- Deployment approvals - ---- - -**Deployment Time**: 2026-02-19 06:31:00 UTC -**Helm Chart**: gitlab/gitlab-agent v1.17.0 -**Agent Version**: v2 -**Replicas**: 2 (zero-downtime upgrades enabled) diff --git a/HANDOFF_BUNDLE.md b/HANDOFF_BUNDLE.md deleted file mode 100644 index 44999bfe..00000000 --- a/HANDOFF_BUNDLE.md +++ /dev/null @@ -1,216 +0,0 @@ -πŸ” TRADEHAX.NET β€” HANDOFF BUNDLE (COLD START) -═══════════════════════════════════════════════════════════════════════════════ - -### 1. REPO CONTEXT -Repo URL / owner-repo: DarkModder33/main (GitHub), DarkModder33/main (GitLab mirror) -Target branch: main -Commit directly or PR: Direct commit -Commit style: Conventional commits - -─────────────────────────────────────────────────────────────────────────────── - -### 2. MODIFIED FILES & GIT STATUS - -=== git status --short === -M scripts/fine-tune-mistral-lora.py -M scripts/run-finetune-workflow.js -?? temp_black_format.py (EXCLUDE from commit) - -=== git diff --stat === -scripts/fine-tune-mistral-lora.py | +85 lines (enhanced with 4-bit quant, LoRA config, dataset normalization) -scripts/run-finetune-workflow.js | +65 lines (workflow runner for fine-tuning pipeline) - -─────────────────────────────────────────────────────────────────────────────── - -### 3. NON-SECRET CONFIG VALUES - -HF_MODEL_ID (active): mistralai/Mistral-7B-Instruct-v0.1 -HF_HUB_MODEL_ID (push target): your-org/tradehax-mistral-finetuned -HF_IMAGE_MODEL_ID: stabilityai/stable-diffusion-2-1 -Dataset file path (default): tradehax-training-expanded.jsonl -Output directory: artifacts/fine-tuned-tradehax-mistral - -Training Defaults (Mistral LoRA): - epochs: 3 - batch_size: 2 - gradient_accumulation_steps: 8 - learning_rate: 2e-4 (0.0002) - lora_r: 16 - lora_alpha: 32 - lora_dropout: 0.05 - max_length: 512 - eval_ratio: 0.1 (10% test split) - seed: 42 - target_modules: ["q_proj", "k_proj", "v_proj", "o_proj"] - quantization: 4-bit (nf4) with double quant - -─────────────────────────────────────────────────────────────────────────────── - -### 4. VALIDATION OUTPUTS - -npm run lint result: βœ… PASS (no errors) -npm run type-check result: βœ… PASS (hf-server.ts and hf-client.ts type-safe) -First run output: Ready (depends on training data availability) - -Dependencies verified in package.json: - @huggingface/inference: ^4.13.12 βœ… - transformers (Python): >=4.46.0 βœ… - torch: >=2.4.0 βœ… - peft (for LoRA): >=0.13.0 βœ… - bitsandbytes (4-bit): >=0.43.0 βœ… - -─────────────────────────────────────────────────────────────────────────────── - -### 5. COMMIT INSTRUCTIONS - -Commit message (conventional commits): -───────────────────────────────────── -feat(llm): add Mistral LoRA fine-tuning pipeline with 4-bit quantization - -- Implement fine-tune-mistral-lora.py with JSONL dataset normalization -- Add run-finetune-workflow.js for orchestrated training -- Support 3 dataset formats: text, messages, instruction/input/output -- Use PEFT LoRA config (r=16, alpha=32) for parameter efficiency -- Enable 4-bit quantization (nf4) with double quant for VRAM efficiency -- HF_API_TOKEN and model ID configurable via environment -- Supports push-to-hub for Hugging Face Model Card integration - -Training pipeline ready: - npm run llm:finetune -- --dataset - npm run llm:finetune:workflow (with auto-deps install) - npm run llm:finetune:workflow:push (with hub upload) - -Adapted for TradeHax domain with trading/DeFi JSONL samples. - -Squash into one commit: YES -Files to EXCLUDE: temp_black_format.py - -─────────────────────────────────────────────────────────────────────────────── - -### 6. PACKAGE.JSON SCRIPTS ADDED - -New LLM-related npm scripts (already in package.json): - npm run llm:prepare-dataset - Prepare custom training data - npm run llm:validate-dataset - Validate JSONL format - npm run llm:finetune - Run fine-tuning (direct Python) - npm run llm:finetune:push - Fine-tune + push to hub - npm run llm:finetune:deps - Install Python dependencies - npm run llm:finetune:workflow - Orchestrated workflow - npm run llm:finetune:workflow:push - Orchestrated + hub upload - -─────────────────────────────────────────────────────────────────────────────── - -### 7. ENVIRONMENT SETUP (NON-SECRETS) - -.env.local current values (secrets redacted, showing structure only): - HF_API_TOKEN=hf_*** (KEEP PRIVATE) - HF_MODEL_ID=mistralai/Mistral-7B-Instruct-v0.1 - HF_IMAGE_MODEL_ID=stabilityai/stable-diffusion-2-1 - VERCEL_OIDC_TOKEN=*** (KEEP PRIVATE) - DISCORD_BOT_TOKEN=*** (KEEP PRIVATE) - Social API keys=*** (KEEP PRIVATE) - -To use fine-tuning locally: - 1. Ensure HF_API_TOKEN is set in shell or .env.local - 2. Run: npm run llm:finetune:workflow - 3. Or: npm run llm:finetune:workflow:push (to upload to Hugging Face) - -─────────────────────────────────────────────────────────────────────────────── - -### 8. FILES TO TRACK IN NEXT COMMIT - -Essential files (no secrets): - βœ… scripts/fine-tune-mistral-lora.py - βœ… scripts/fine-tune-requirements.txt - βœ… scripts/run-finetune-workflow.js - βœ… lib/ai/hf-client.ts (already committed) - βœ… lib/ai/hf-server.ts (already committed) - βœ… package.json (already updated with scripts) - -Skip: - ❌ .env.local (secrets, gitignored, local only) - ❌ temp_black_format.py (temporary, not tracked) - ❌ artifacts/* (build output, gitignored) - ❌ .next/* (build cache, gitignored) - -─────────────────────────────────────────────────────────────────────────────── - -### 9. WORKFLOW VALIDATION BEFORE PUSH - -Run these locally to verify everything works: - - 1. npm run type-check - Expected: βœ… No errors (TypeScript validates hf-client & hf-server) - - 2. npm run lint - Expected: βœ… ESLint pass (all .ts files lint clean) - - 3. npm run llm:finetune:deps - Expected: βœ… Python packages installed (transformers, torch, peft, bitsandbytes) - - 4. python ./scripts/fine-tune-mistral-lora.py --help - Expected: βœ… Shows argparse help with all options - - 5. node ./scripts/run-finetune-workflow.js --help (optional, no help yet) - Expected: βœ… Script runs without error - -βœ… All validations should pass before pushing. - -─────────────────────────────────────────────────────────────────────────────── - -### 10. POST-COMMIT STEPS - -After commit is merged to main: - - 1. Update CI/CD to include fine-tuning in pipeline (if desired): - - Add HF_API_TOKEN to GitHub Secrets (KEEP PRIVATE) - - Add training workflow trigger (manual or scheduled) - - 2. Document in repo: - - Fine-tuning guide (README section) - - Example JSONL format - - Hub model card - - 3. Test end-to-end: - - Push fine-tuned adapter to Hugging Face - - Verify it loads with PEFT + base model - -─────────────────────────────────────────────────────────────────────────────── - -### 11. COLD START RESUME CHECKLIST - -If resuming later with fresh checkout: - - [ ] Clone: git clone https://github.com/DarkModder33/main.git - [ ] Branch: git checkout main - [ ] Install Node deps: npm install - [ ] Install Python deps: npm run llm:finetune:deps - [ ] Set HF_API_TOKEN in shell or .env.local - [ ] Prepare dataset: npm run llm:prepare-dataset - [ ] Run fine-tuning: npm run llm:finetune:workflow [--push] - [ ] All logs should show: "Training complete." - -─────────────────────────────────────────────────────────────────────────────── - -### HANDOFF SUMMARY - -βœ… Files ready: - - fine-tune-mistral-lora.py (production-grade, 4-bit + LoRA) - - run-finetune-workflow.js (npm-integrated orchestration) - - fine-tune-requirements.txt (pinned versions) - - hf-client.ts (HF Inference API) - - hf-server.ts (server-side config & caching) - -βœ… Config ready: - - Environment variables documented - - Training hyperparameters tuned for Mistral 7B - - Dataset normalization supports 3 JSONL formats - -βœ… Scripts ready: - - npm run llm:finetune (direct) - - npm run llm:finetune:push (with hub upload) - - npm run llm:finetune:workflow (orchestrated, deps auto-install) - -βœ… Ready to commit and deploy to production. - -═══════════════════════════════════════════════════════════════════════════════ diff --git a/HARD_LAUNCH_RUNBOOK.md b/HARD_LAUNCH_RUNBOOK.md deleted file mode 100644 index 55ececb3..00000000 --- a/HARD_LAUNCH_RUNBOOK.md +++ /dev/null @@ -1,54 +0,0 @@ -# TradeHax Hard Launch Runbook - -This runbook covers the production launch path for monetization and deployment. - -## 1) Pre-Launch Environment Setup - -Set these variables in Vercel (Production environment): - -- `NEXTAUTH_URL` -- `NEXTAUTH_SECRET` (32+ chars) -- `TRADEHAX_STRIPE_CHECKOUT_URL` and/or tier-specific variants -- `TRADEHAX_COINBASE_CHECKOUT_URL` and/or tier-specific variants -- `TRADEHAX_WEBHOOK_SECRET` -- `TRADEHAX_ADMIN_KEY` -- `TRADEHAX_ALLOW_PAYMENT_SIMULATION=false` - -Optional provider-specific overrides: - -- `TRADEHAX_STRIPE_CHECKOUT_URL_BASIC` -- `TRADEHAX_STRIPE_CHECKOUT_URL_PRO` -- `TRADEHAX_STRIPE_CHECKOUT_URL_ELITE` -- `TRADEHAX_COINBASE_CHECKOUT_URL_BASIC` -- `TRADEHAX_COINBASE_CHECKOUT_URL_PRO` -- `TRADEHAX_COINBASE_CHECKOUT_URL_ELITE` - -## 2) Billing and Entitlement Surfaces - -- Customer billing page: `/billing` -- Pricing page (launch tiers): `/pricing` -- Monetization admin metrics page: `/admin/monetization` -- Stripe webhook endpoint: `/api/monetization/webhooks/stripe` - -## 3) Launch Validation Checklist - -1. Open `/billing`, verify plans render and subscription snapshot loads. -2. Verify checkout POST `/api/monetization/checkout` returns configured URL in production. -3. Trigger webhook test payload against `/api/monetization/webhooks/stripe`. -4. Verify AI endpoint usage gating: - - `/api/ai/chat` returns usage metadata. - - Daily limits block with `USAGE_LIMIT_REACHED`. -5. Verify admin metrics endpoint: - - `/api/monetization/admin/metrics` responds with valid `x-tradehax-admin-key`. - -## 4) Deployment Contract - -- CI pipeline command: `npm run pipeline:ci` -- Production branch: `main` -- Host: `tradehax.net` (primary domain) - -## 5) Rollback - -1. Re-deploy previous successful Vercel production build. -2. If needed, revert commit on `main` and push. -3. Re-run webhook tests and `/billing` checks after rollback. diff --git a/HF_DATASET_UPLOAD.md b/HF_DATASET_UPLOAD.md deleted file mode 100644 index 2685b58c..00000000 --- a/HF_DATASET_UPLOAD.md +++ /dev/null @@ -1,153 +0,0 @@ -# Hugging Face Dataset Upload Instructions - -## βœ… Your Training Data is Ready - -File: `ai-training-set.jsonl` (5.73 KB) -Format: JSONL (JSON Lines) - 26 instruction-response pairs - -### Step 1: Create Dataset Repository - -1. Go to: https://huggingface.co/new-dataset -2. Fill in: - - **Name**: `tradehax-behavioral` - - **Description**: `TradeHax AI behavioral training dataset - Q&A pairs for fine-tuning language models` - - **License**: `openrail` (or your preferred license) - - **Private**: Leave unchecked (public) -3. Click "Create dataset" - -### Step 2: Upload Training File - -After creating the dataset: - -1. Go to: https://huggingface.co/datasets/DarkModder33/tradehax-behavioral -2. Click "Upload files" or go to the Files tab -3. Drag & drop or select `ai-training-set.jsonl` -4. Click "Upload" - -### Step 3: Verify Upload - -You should see your file in the dataset with: -- βœ… File size: ~5.7 KB -- βœ… Format: JSONL (properly recognized) -- βœ… Preview: Shows the JSON structure - ---- - -## πŸ“Š Training Data Contents - -26 Q&A pairs covering: -- What is TradeHax -- Getting started -- Blockchain integration -- Token rewards (THX) -- Prediction accuracy -- Trading pairs -- Security features -- Wallet connection -- Hyperborea game -- NFT skins -- Community features -- API & automation -- Technical details -- Support & troubleshooting - -### Sample Entry: -```json -{"instruction": "What is TradeHax?", "response": "TradeHax is an AI-powered trading analysis platform that combines blockchain technology with advanced machine learning..."} -``` - ---- - -## πŸš€ Next Steps - -After uploading to Hugging Face, you can: - -### 1. Fine-tune a Model -```bash -# Using Hugging Face Trainer -python -m transformers.training --model_name mistral-7b \ - --dataset_name DarkModder33/tradehax-behavioral -``` - -### 2. Use for Few-Shot Learning -```python -from datasets import load_dataset - -dataset = load_dataset("DarkModder33/tradehax-behavioral") -for example in dataset['train']: - print(f"Q: {example['instruction']}") - print(f"A: {example['response']}\n") -``` - -### 3. Integrate with Your LLM -```typescript -// In your HF inference code -const dataset = await loadDataset("DarkModder33/tradehax-behavioral"); -// Use for context injection or fine-tuning -``` - -### 4. Expand the Dataset -Add more training examples to improve model accuracy: -- Technical documentation -- Common user issues -- Trading strategies -- Community FAQs - ---- - -## πŸ“ Dataset Format - -JSONL format (one JSON object per line): - -``` -{"instruction": "...", "response": "..."} -{"instruction": "...", "response": "..."} -... -``` - -### Fields: -- **instruction**: User query or question -- **response**: Model output or answer - -This format is compatible with: -- Hugging Face Transformers -- LLaMA fine-tuning -- OpenAI fine-tuning -- Other popular frameworks - ---- - -## πŸ”— Quick Links - -- **Create Dataset**: https://huggingface.co/new-dataset -- **Your Dataset** (after creation): https://huggingface.co/datasets/DarkModder33/tradehax-behavioral -- **Upload Files**: https://huggingface.co/datasets/DarkModder33/tradehax-behavioral/upload/main -- **Hugging Face Hub**: https://huggingface.co - ---- - -## πŸ’‘ Tips - -1. **Keep updating**: Add more Q&A pairs over time for better model performance -2. **Version control**: Use Git LFS for large datasets -3. **Documentation**: Add a README.md to your dataset repo -4. **License**: Clearly specify your license for legal clarity -5. **Community**: Make it public to let others use/cite your data - ---- - -## ✨ Your Training File is Ready to Upload - -The file `ai-training-set.jsonl` contains everything needed. Just upload it to your new Hugging Face dataset! - -Run this command after creating the dataset (if you have curl): - -```bash -$env:HF_TOKEN="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" -curl -X POST ` - -H "Authorization: Bearer $env:HF_TOKEN" ` - -F "file=@ai-training-set.jsonl" ` - "https://huggingface.co/api/datasets/DarkModder33/tradehax-behavioral/upload/main/ai-training-set.jsonl" -``` - -Or use the web interface (recommended) at the links above! diff --git a/HF_FINE_TUNING_WORKFLOW.md b/HF_FINE_TUNING_WORKFLOW.md deleted file mode 100644 index b4c75e9b..00000000 --- a/HF_FINE_TUNING_WORKFLOW.md +++ /dev/null @@ -1,104 +0,0 @@ -# Hugging Face Fine-Tuning Workflow for TradeHax - -## Overview - -This workflow is production-oriented: - -- Train with a real Mistral base model. -- Push adapters/model artifacts to Hugging Face Hub. -- Switch runtime inference to your fine-tuned model ID. - -## 1) Push latest repo state - -1. Ensure local lint/type-check pass. -2. Push branch: - - `git push origin main` - -## 2) Configure real training in `.env` - -Use these defaults for real runs: - -- `HF_MODEL_ID=mistralai/Mistral-7B-Instruct-v0.1` -- `HF_HUB_MODEL_ID=your-org/tradehax-mistral-finetuned` -- `DATASET_PATH=tradehax-training-expanded.jsonl` -- `TRAIN_EPOCHS=3` -- `TRAIN_BATCH_SIZE=4` -- `TRAIN_LR=2e-5` -- `LORA_R=16` -- `LORA_ALPHA=32` -- `LOAD_4BIT=True` -- `USE_CUDA=True` (if GPU available) -- `CLEAN_CHECKPOINTS=True` (for clean reruns) - -If GPU is unavailable or CPU-only is too slow: - -- Use **Hugging Face AutoTrain** for managed training, or -- Run in **Google Colab** (T4/A100 runtime) with this repo script + dataset upload. - -## 3) Run fine-tune - -Before training, refresh live market deltas into `data/external-datasets`: - -- Dry-run ingestion: - - `npm run llm:ingest-live-deltas:dry-run` -- Write live delta rows: - - `npm run llm:ingest-live-deltas` -- Full continuous refresh (ingest + prepare + validate): - - `npm run llm:continuous-refresh` - -Optional env vars for ingestion: - -- `TRADEHAX_LIVE_SYMBOLS=BTC,ETH,SOL,SPY,QQQ,TSLA,NVDA` -- `FINNHUB_API_KEY=...` (enables live news deltas) -- `TRADEHAX_LIVE_MAX_SYMBOLS=12` -- `TRADEHAX_LIVE_INGEST_RETRIES=2` -- `TRADEHAX_LIVE_INGEST_TIMEOUT_MS=9000` - -Install dependencies (once): - -- `pip install -r scripts/fine-tune-requirements.txt` - -Run training: - -- `npm run fine-tune` - -The workflow script supports dependency bootstrap (`--install-deps`) via `run-finetune-workflow.js` when needed. - -## 4) Push to Hub and switch inference model - -After successful `push_to_hub`: - -1. In Vercel project environment variables, set: - - `HF_MODEL_ID=your-org/tradehax-mistral-finetuned` -2. Redeploy. - -## 5) Test inference - -Validate server route: - -- `POST /api/hf-server` -- Body example: - - `{ "prompt": "Give me a concise BTC/ETH market brief.", "task": "text-generation" }` - -Expected result: - -- `200` with `{ output: ... }` - -If failures occur: - -- Confirm `HF_API_TOKEN` and `HF_MODEL_ID` in Vercel. -- Confirm model visibility/permissions on HF Hub. -- Check Vercel function logs for HF inference errors. - -## 6) Monetization enablement for premium AI - -To expose premium AI subscriptions: - -- Enable `NEXT_PUBLIC_ENABLE_PAYMENTS=true` -- Configure billing envs (Stripe/Coinbase/etc.) used by monetization routes. -- Verify billing screens and `/api/monetization/*` endpoints in staging before prod cutover. - -## Notes - -- Tiny-model tests are useful for plumbing verification only. -- Real quality targets require Mistral + full dataset + stable HF Hub artifact. diff --git a/HF_INTEGRATION_GUIDE.md b/HF_INTEGRATION_GUIDE.md deleted file mode 100644 index 91829331..00000000 --- a/HF_INTEGRATION_GUIDE.md +++ /dev/null @@ -1,255 +0,0 @@ -# Quick Integration Guide: HF Fine-Tuning in TradeHax - -## πŸš€ 5-Minute Setup - -### 1. Clone the repo (if needed) -```bash -git clone https://github.com/DarkModder33/main.git -cd main -``` - -### 2. Setup environment -```bash -cp .env.example .env.local -``` - -Then edit `.env.local` and set: -```bash -HF_API_TOKEN=hf_YOUR_TOKEN_HERE # Get from https://huggingface.co/settings/tokens -``` - -### 3. Install dependencies -```bash -npm install -npm run llm:finetune:deps -``` - -### 4. Run fine-tuning (optional, requires training data) -```bash -npm run llm:finetune:workflow:push -``` - -This will: -- Load training data from `data/custom-llm/tradehax-training-expanded.jsonl` -- Fine-tune Mistral-7B with LoRA -- Push model to Hugging Face Hub - -### 5. Start development server -```bash -npm run dev -``` - -Test the API: -```bash -curl -X POST http://localhost:3000/api/hf-server \ - -H "Content-Type: application/json" \ - -d '{"prompt": "Generate a trading strategy", "task": "text-generation"}' -``` - ---- - -## πŸ“ Using the HF Client in React Components - -```typescript -import { useHfClient } from '@/components/hf-client'; - -export function TradeAnalyzer() { - const { callHfApi, loading, error } = useHfClient(); - const [result, setResult] = useState(null); - - const analyze = async () => { - try { - const output = await callHfApi( - 'Analyze BTC/USD technical indicators for buy signals', - 'text-generation', - { max_length: 512, temperature: 0.7 } - ); - setResult(output); - } catch (err) { - console.error('Analysis failed:', err); - } - }; - - return ( -
- - {error &&

{error}

} - {result &&
{JSON.stringify(result, null, 2)}
} -
- ); -} -``` - ---- - -## πŸ–ΌοΈ Image Generation Example - -```typescript -const imageOutput = await callHfApi( - 'Trading chart with bull flag pattern, financial dashboard style', - 'image-generation', - { - steps: 30, - guidance_scale: 7.5, - negative_prompt: 'low quality, blurry, distorted' - } -); - -// imageOutput is a Blob (image data) -const imageUrl = URL.createObjectURL(imageOutput); -``` - ---- - -## πŸ”§ API Endpoint Reference - -### POST `/api/hf-server` - -**Request Body:** -```json -{ - "prompt": "Your text or image prompt", - "task": "text-generation|image-generation", - "parameters": { - "max_length": 768, - "temperature": 0.85, - "top_p": 0.95, - "steps": 30, - "guidance_scale": 6.5 - } -} -``` - -**Response (Text):** -```json -{ - "output": [ - { - "generated_text": "Your generated trading strategy..." - } - ] -} -``` - -**Response (Image):** -```json -{ - "output": -} -``` - ---- - -## πŸ“Š Environment Variables - -Copy to `.env.local`: - -```bash -# Hugging Face API Token (REQUIRED - keep secret!) -HF_API_TOKEN=hf_YOUR_TOKEN_HERE - -# Text Generation Model -HF_MODEL_ID=mistralai/Mistral-7B-Instruct-v0.1 -LLM_MAX_LENGTH=768 -LLM_TEMPERATURE=0.85 -LLM_TOP_P=0.95 - -# Image Generation Model -HF_IMAGE_MODEL_ID=stabilityai/stable-diffusion-2-1 -HF_IMAGE_STEPS=30 -HF_IMAGE_GUIDANCE_SCALE=6.5 -HF_IMAGE_NEGATIVE_PROMPT_DEFAULT=low quality, blurry - -# Fine-Tuning Config -HF_HUB_MODEL_ID=your-org/tradehax-mistral-finetuned -DATASET_PATH=data/custom-llm/tradehax-training-expanded.jsonl -TRAIN_EPOCHS=3 -TRAIN_BATCH_SIZE=4 -TRAIN_LR=2e-5 -LORA_R=16 -LORA_ALPHA=32 -``` - ---- - -## πŸ§ͺ Testing Checklist - -- [ ] `.env.local` created with valid `HF_API_TOKEN` -- [ ] `npm install` completed -- [ ] `npm run llm:finetune:deps` completed -- [ ] `npm run type-check` passes -- [ ] `npm run lint` passes -- [ ] `npm run dev` starts without errors -- [ ] API endpoint responds: `curl -X POST http://localhost:3000/api/hf-server ...` -- [ ] React component successfully calls `useHfClient()` - ---- - -## πŸš€ Production Deployment (Vercel) - -1. Add environment variables to Vercel dashboard: - - `HF_API_TOKEN` (keep marked as secret) - - All other `HF_*` variables - -2. Push to main: - ```bash - git add . - git commit -m "feat: configure HF fine-tuning for production" - git push origin main - ``` - -3. Vercel auto-deploys. Test live endpoint: - ```bash - curl -X POST https://tradehax.net/api/hf-server \ - -H "Content-Type: application/json" \ - -d '{"prompt": "Trading signal", "task": "text-generation"}' - ``` - ---- - -## πŸ’° Monetization Opportunities - -### Premium AI Queries -- Charge per API call for advanced LLM outputs -- Implement rate limiting + billing system -- Examples: custom trading analysis, market predictions - -### Fine-Tuned Model Access -- Offer API access to fine-tuned Mistral model -- Subscription tiers (starter, pro, enterprise) - -### Training Data Services -- Collect high-quality trading data -- Fine-tune models for other traders/platforms -- Sell model access via Hub - -### Contact for Consultations -**Email:** darkmodder33@proton.me - ---- - -## πŸ“š Full Documentation - -- `docs/HF_FINE_TUNING_WORKFLOW.md` β€” Complete workflow guide -- `SETUP_VERIFICATION.md` β€” Detailed setup checklist -- `scripts/fine-tune-mistral-lora.py` β€” Training implementation - ---- - -## πŸ†˜ Troubleshooting - -| Error | Solution | -|-------|----------| -| `HF_API_TOKEN is not defined` | Add to `.env.local`: `HF_API_TOKEN=hf_...` | -| `Module not found: @huggingface/inference` | Run: `npm install @huggingface/inference` | -| `Python module not found` | Run: `npm run llm:finetune:deps` | -| `CUDA out of memory` | Fine-tuning uses 4-bit quantization, should work on 8GB+ GPU | -| `API returns 500` | Check server logs: `npm run dev` and inspect `/api/hf-server` | - ---- - -**Status:** βœ… Ready to integrate -**Last Updated:** 2026-02-24 -**Commit:** 29b8ee9 diff --git a/HF_SETUP_GUIDE.md b/HF_SETUP_GUIDE.md deleted file mode 100644 index baa0a622..00000000 --- a/HF_SETUP_GUIDE.md +++ /dev/null @@ -1,349 +0,0 @@ -# Hugging Face LLM Integration Guide - -## Quick Setup - -### 1. Get Your API Token - -1. Go to [huggingface.co/settings/tokens](https://huggingface.co/settings/tokens) -2. Click "New token" -3. Set name: `tradehax-ai` -4. Set role: `read` (read-only access) -5. Copy the token - -### 2. Configure Environment - -Create or edit `.env.local`: - -```bash -# Hugging Face Configuration -HF_API_TOKEN=hf_your_token_here - -# Choose a model -HF_MODEL_ID=mistralai/Mistral-7B-Instruct-v0.1 -HF_USE_LOCAL_MODEL=false - -# Optional settings -LLM_TEMPERATURE=0.7 -LLM_MAX_LENGTH=512 -LLM_TOP_P=0.95 -``` - -### 3. Start Development Server - -```bash -npm run dev -``` - -### 4. Test the API - -Go to: **http://localhost:3000/ai** - -Or test via curl: - -```bash -curl -X POST http://localhost:3000/api/ai/generate \ - -H "Content-Type: application/json" \ - -d '{"prompt": "Hello, how are you?"}' -``` - ---- - -## Available Models - -### Free/Fast (No Inference Fee) - -- **distilgpt2** - Fastest, ~82M parameters -- **gpt2** - Fast, ~124M parameters -- **distilbert-base-uncased** - BERT variant - -### Recommended (Low Cost) - -- **mistralai/Mistral-7B-Instruct-v0.1** - Excellent balance, ~7B params (default) -- **microsoft/phi-2** - Fast and capable, ~2.7B params - -### Best Quality (Higher Cost/Latency) - -- **meta-llama/Llama-2-7b** - Powerful open-source, ~7B params -- **tiiuae/falcon-7b** - Fast, high-quality, ~7B params -- **meta-llama/Llama-2-13b** - Even more capable, ~13B params - -### Specialized Models - -- **Salesforce/codet5-large** - Code generation and understanding -- **facebook/bart-large-cnn** - Text summarization -- **Helsinki-NLP/opus-mt-en-es** - English to Spanish translation - ---- - -## API Endpoints - -### POST /api/ai/generate - -Generate text from a prompt. - -**Request:** -```json -{ - "prompt": "Write a poem about AI", - "maxTokens": 256, - "temperature": 0.8 -} -``` - -**Response:** -```json -{ - "ok": true, - "text": "...", - "model": "mistralai/Mistral-7B-Instruct-v0.1", - "tokensUsed": 42 -} -``` - -### POST /api/ai/chat - -Chat with conversation history. - -**Request:** -```json -{ - "messages": [ - { "role": "user", "content": "What is AI?" }, - { "role": "assistant", "content": "AI is..." }, - { "role": "user", "content": "Tell me more." } - ], - "systemPrompt": "You are a helpful AI assistant" -} -``` - -**Response:** -```json -{ - "ok": true, - "message": { - "role": "assistant", - "content": "..." - }, - "model": "mistralai/Mistral-7B-Instruct-v0.1" -} -``` - -### POST /api/ai/summarize - -Summarize long text. - -**Request:** -```json -{ - "text": "Lorem ipsum dolor sit amet...", - "maxLength": 150 -} -``` - -**Response:** -```json -{ - "ok": true, - "summary": "...", - "model": "mistralai/Mistral-7B-Instruct-v0.1" -} -``` - -### POST /api/ai/stream - -Stream text generation using Server-Sent Events (SSE). - -**Request:** -```bash -curl -N http://localhost:3000/api/ai/stream \ - -X POST \ - -H "Content-Type: application/json" \ - -d '{"prompt": "Hello"}' -``` - -**Response (streaming):** -``` -data: {"type":"start"} -data: {"type":"token","text":" world"} -data: {"type":"token","text":"!"} -data: {"type":"end"} -``` - ---- - -## Using in React Components - -### Chat Component - -```tsx -import { HFChatComponent } from "@/components/ai/HFChatComponent"; - -export default function MyPage() { - return ; -} -``` - -### Generator Component - -```tsx -import { HFGeneratorComponent } from "@/components/ai/HFGeneratorComponent"; - -export default function MyPage() { - return ; -} -``` - -### Custom Hook - -```tsx -import { useHFChat } from "@/hooks/useHFChat"; - -export function MyComponent() { - const { messages, sendMessage, loading } = useHFChat(); - - return ( - <> - {messages.map(m =>
{m.content}
)} - - - ); -} -``` - ---- - -## Advanced: Custom Prompts - -### Prompt Structuring - -Use system prompts to guide behavior: - -```typescript -const messages = [ - { - role: "system", - content: "You are a helpful Python programming assistant." - }, - { - role: "user", - content: "How do I read a JSON file?" - } -]; - -await fetch("/api/ai/chat", { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ messages }) -}); -``` - -### Few-Shot Examples - -```typescript -const messages = [ - { role: "user", content: "Translate to Spanish: Hello" }, - { role: "assistant", content: "Hola" }, - { role: "user", content: "Translate to Spanish: Good morning" }, - { role: "assistant", content: "Buenos dΓ­as" }, - { role: "user", content: "Translate to Spanish: How are you?" } -]; -``` - -### Chain-of-Thought - -```typescript -const prompt = ` -Question: If there are 5 apples and you eat 2, how many are left? - -Let's think step by step: -1. Start with 5 apples -2. You eat 2 apples -3. 5 - 2 = ? - -Answer: -`; -``` - ---- - -## Troubleshooting - -### "HF_API_TOKEN not set" - -- Add `HF_API_TOKEN=hf_your_token` to `.env.local` -- Restart dev server (`npm run dev`) -- Check that your token is valid at [huggingface.co/settings/tokens](https://huggingface.co/settings/tokens) - -### "Model not available" - -- Model might be loading (first request is slow) -- Check model exists: https://huggingface.co/models -- Try a smaller model like `distilgpt2` -- Verify internet connection - -### Slow Responses - -- First request loads model (30-60 seconds) -- Subsequent requests are faster -- Try smaller model: `distilgpt2` or `gpt2` -- Increase timeout if needed - -### Memory Issues - -- Run on machine with at least 4GB RAM -- Use smaller models: `distilgpt2` (~300MB), `gpt2` (~500MB) -- Avoid running other large applications - ---- - -## Local Model Setup (Optional) - -To run models locally without API calls: - -```bash -# Install transformers for CPU/GPU -npm install transformers onnxruntime-web - -# Update .env.local -HF_USE_LOCAL_MODEL=true -HF_LOCAL_MODEL_NAME=gpt2 -``` - ---- - -## Resources - -- [Hugging Face Hub](https://huggingface.co) -- [Inference API Docs](https://huggingface.co/docs/api-inference) -- [@huggingface/inference Docs](https://www.npmjs.com/package/@huggingface/inference) -- [Transformers.js](https://github.com/xenova/transformers.js) - ---- - -## Next Steps - -1. βœ… Set up API token -2. βœ… Configure .env.local -3. βœ… Test /api/ai endpoints -4. βœ… Use HFChatComponent or HFGeneratorComponent -5. Build custom AI features for your app - ---- - -## Security Notes - -- **Never commit** `HF_API_TOKEN` to git -- **Use `.env.local`** for local development (gitignored) -- **Use GitHub Secrets** for production (Vercel) -- API calls are sent to Hugging Face servers -- Consider rate limits for production use - ---- - -## Billing - -- **Free tier**: Limited requests (enough for testing) -- **Pro tier** ($9/month): Higher rate limits -- **Enterprise**: Custom solutions - -Check your usage: https://huggingface.co/billing/overview diff --git a/IDE_AUTOMATION_WORKFLOW.md b/IDE_AUTOMATION_WORKFLOW.md deleted file mode 100644 index 3d381df3..00000000 --- a/IDE_AUTOMATION_WORKFLOW.md +++ /dev/null @@ -1,53 +0,0 @@ -# TradeHax IDE + Pipeline Workflow - -This workspace now uses one explicit local-to-CI flow so daily work feels predictable for AI-assisted development. - -## 1) Pick One Active Local Repo - -Canonical/mirror model: -- `C:\tradez\main` -- `C:\DarkModder33\main` - -Use `C:\tradez\main` as canonical for active coding and keep `C:\DarkModder33\main` as mirror-only. - -## 2) Local IDE Loop - -Use `tradehaxai.code-workspace` and these VS Code tasks: -- `TradeHax: Dev Server` -- `TradeHax: Lint` -- `TradeHax: Type Check` -- `TradeHax: Local Pipeline` -- `TradeHax: Deploy Preflight` -- `TradeHax: Repo Status` -- `TradeHax: Sync Mirror` -- `TradeHax: Extension Watch` - -`TradeHax: Local Pipeline` is the default build task and runs: -- `npm run pipeline:local` - -## 3) Script Contract - -- `npm run pipeline:quality` - - Lint + TypeScript checks -- `npm run pipeline:local` - - Local quality gate + production build -- `npm run pipeline:deploy-checks` - - DNS + Vercel validation scripts -- `npm run pipeline:ci` - - Clean + quality + production build - - Runs deploy checks automatically when `bash` is available (always in Linux CI) - -## 4) CI Contract - -`.github/workflows/build-check.yml` now runs: -- `npm run pipeline:ci` - -That keeps CI behavior aligned with the same command contract used locally. - -## 5) Debug Flow - -Use launch profiles: -- `TradeHax: Next.js Full Stack` -- `TradeHax: Browser Debug` -- `TradeHax: VS Code Extension` -- `TradeHax: App + Extension` (compound) diff --git a/IDE_PIPELINE_READY.md b/IDE_PIPELINE_READY.md deleted file mode 100644 index 961a2c17..00000000 --- a/IDE_PIPELINE_READY.md +++ /dev/null @@ -1,425 +0,0 @@ -╔════════════════════════════════════════════════════════════════════════════╗ -β•‘ β•‘ -β•‘ βœ… IDE PIPELINE & MULTI-LOCATION SYNC WORKFLOW READY β•‘ -β•‘ β•‘ -β•‘ Complete Development Consistency Across Machines β•‘ -β•‘ β•‘ -β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - - -🎯 WHAT'S BEEN IMPLEMENTED - -═════════════════════════════════════════════════════════════════════════════ - -βœ… THREE-TIER IDE SYNC WORKFLOW - -1. Quick Sync (30 seconds) - npm run ide:sync - β€’ Git fetch + status report - β€’ Git hooks install - β€’ ESLint check - β€’ TypeScript check - -2. Full Sync (2 minutes) - npm run ide:sync:full - β€’ Quick sync + - β€’ npm ci (clean install) - β€’ Production build - β€’ Namecheap deploy config check (warning mode) - -3. Deploy-Ready Strict Sync (2 minutes) - npm run ide:sync:deploy-ready - β€’ Full sync + - β€’ STRICT Namecheap secret verification - β€’ Final git status check - -βœ… VS CODE INTEGRATION - -8 integrated tasks available via Command Palette (Ctrl+Shift+P): - β€’ TradeHax: IDE Sync (Quick) - β€’ TradeHax: IDE Sync (Full) - β€’ TradeHax: IDE Sync (Deploy Ready) - β€’ TradeHax: Lint - β€’ TradeHax: Type Check - β€’ TradeHax: Build - β€’ TradeHax: Dev Server - β€’ TradeHax: Deploy to Namecheap - -βœ… MULTI-LOCATION BEST PRACTICES - -Documented workflow for: - β€’ Opening workspace on any machine - β€’ Syncing with origin/main - β€’ Pre-push validation - β€’ Pre-deployment verification - β€’ Secret management - -βœ… NAMECHEAP DEPLOYMENT INTEGRATION - -Automated checks for: - β€’ Required secrets (VPS host, user, SSH key) - β€’ Optional config (port, app root, app port) - β€’ Strict vs warning mode - β€’ GitHub Actions integration - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ“ FILES CREATED - -═════════════════════════════════════════════════════════════════════════════ - -Implementation: - βœ… scripts/ide-sync-workflow.js (1,000+ LOC) - - Complete workflow orchestration - - Multi-stage sync logic - - Namecheap config validation - - Color-coded output - - Error handling & next steps - -Configuration: - βœ… .vscode/tasks.json (updated) - - 8 integrated VS Code tasks - - Problem matchers for errors - - Background task for dev server - - Customizable panel behavior - -Documentation: - βœ… IDE_PIPELINE_WORKFLOW.md (11 KB) - - Complete user guide - - Command reference - - Best practices - - Examples & troubleshooting - - Secret management guide - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸš€ QUICK START - -═════════════════════════════════════════════════════════════════════════════ - -Terminal Usage: - -# Morning startup (any machine) -npm run ide:sync - -# Before pushing code -npm run ide:sync:full - -# Before Namecheap deployment -npm run ide:sync:deploy-ready - - -VS Code Usage: - -1. Open Command Palette: Ctrl+Shift+P (Windows/Linux) or Cmd+Shift+P (Mac) -2. Type "Run Task" -3. Select desired task: - - "TradeHax: IDE Sync (Quick)" - - "TradeHax: IDE Sync (Full)" - - "TradeHax: IDE Sync (Deploy Ready)" - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ”‘ NAMECHEAP SECRETS SETUP - -═════════════════════════════════════════════════════════════════════════════ - -REQUIRED (GitHub Secrets): - NAMECHEAP_VPS_HOST=199.188.201.164 - NAMECHEAP_VPS_USER=traddhou - NAMECHEAP_VPS_SSH_KEY=(your private SSH key) - -OPTIONAL (GitHub Secrets): - NAMECHEAP_VPS_PORT=22 - NAMECHEAP_APP_ROOT=/home/traddhou/public_html - NAMECHEAP_APP_PORT=3000 - -LOCAL DEVELOPMENT (.env): - cp .env.example .env - # Add secrets to .env (never commit) - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ“Š WORKFLOW MODES - -═════════════════════════════════════════════════════════════════════════════ - -QUICK SYNC (npm run ide:sync) -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ 1. Git Fetch β”‚ -β”‚ 2. Git Status Report β”‚ -β”‚ 3. Git Hooks Install β”‚ -β”‚ 4. Lint Check β”‚ -β”‚ 5. Type Check β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -Time: ~30 seconds -Status: βœ… Ready to edit -Use: When opening workspace, daily sync - -FULL SYNC (npm run ide:sync:full) -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ 1-5. Quick Sync β”‚ -β”‚ 6. npm ci (clean install) β”‚ -β”‚ 7. npm run build β”‚ -β”‚ 8. Namecheap config check (warn) β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -Time: ~2 minutes -Status: βœ… Ready to push -Use: Before pushing to GitHub - -DEPLOY-READY STRICT SYNC (npm run ide:sync:deploy-ready) -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ 1-8. Full Sync β”‚ -β”‚ 9. STRICT Namecheap secret verify β”‚ -β”‚ 10. Final git status β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -Time: ~2 minutes -Status: βœ… Ready to deploy OR ❌ Fails if secrets missing -Use: Before Namecheap deployment - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ“‹ MULTI-MACHINE BEST PRACTICES - -═════════════════════════════════════════════════════════════════════════════ - -OPENING ON NEW MACHINE: - 1. Clone repo: git clone https://github.com/DarkModder33/main.git - 2. Open in VS Code - 3. Run: npm run ide:sync (quick check) - 4. Start: npm run dev - -DURING DEVELOPMENT: - 1. Keep local changes small - 2. Commit frequently - 3. Before pushing: npm run ide:sync:full - 4. If changes elsewhere: git pull origin main - 5. Continue coding - -BEFORE DEPLOYMENT: - 1. Ensure all code pushed: git push origin main - 2. Run: npm run ide:sync:deploy-ready - 3. If passed: bash scripts/deploy-to-namecheap.sh - 4. Monitor: pm2 logs tradehax - -SECRET MANAGEMENT: - βœ… GitHub Secrets for CI/CD - βœ… .env file for local development (never committed) - βœ… Server env files on Namecheap - ❌ Never hardcode secrets - ❌ Never commit secrets - - -═════════════════════════════════════════════════════════════════════════════ - - -🎯 OUTPUT EXAMPLES - -═════════════════════════════════════════════════════════════════════════════ - -QUICK SYNC OUTPUT: -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ ╔════════════════════════════════════════════════════╗ β”‚ -β”‚ β•‘ TradeHax IDE Sync (Quick) β•‘ β”‚ -β”‚ β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• β”‚ -β”‚ β”‚ -β”‚ βœ… Fetching latest from origin/main β”‚ -β”‚ ℹ️ Git Status: 0 commits ahead, 0 commits behind β”‚ -β”‚ βœ… Installing git hooks β”‚ -β”‚ βœ… Running ESLint β”‚ -β”‚ βœ… Running TypeScript check β”‚ -β”‚ β”‚ -β”‚ Passed: 5/5 β”‚ -β”‚ Warnings: 0/5 β”‚ -β”‚ Failed: 0/5 β”‚ -β”‚ β”‚ -β”‚ βœ… Quick sync complete! Ready to edit. β”‚ -β”‚ npm run ide:sync:full β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - -DEPLOY-READY OUTPUT (IF SECRETS MISSING): -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ ❌ Deploy-ready check FAILED: β”‚ -β”‚ Missing: NAMECHEAP_VPS_SSH_KEY β”‚ -β”‚ β”‚ -β”‚ Set these in GitHub Secrets: β”‚ -β”‚ NAMECHEAP_VPS_HOST=199.188.201.164 β”‚ -β”‚ NAMECHEAP_VPS_USER=traddhou β”‚ -β”‚ NAMECHEAP_VPS_SSH_KEY=your_private_key β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ”— INTEGRATION POINTS - -═════════════════════════════════════════════════════════════════════════════ - -GitHub Actions CI/CD: - β€’ Workflow: .github/workflows/namecheap-vps-deploy.yml - β€’ Runs: lint + type-check on every push - β€’ Deploys: On tag or manual trigger - β€’ Uses: NAMECHEAP_VPS_* secrets - -VS Code Tasks: - β€’ File: .vscode/tasks.json - β€’ Triggered: Command Palette (Ctrl+Shift+P) - β€’ Can run: Quick, full, or specific checks - β€’ Real-time output in panel - -npm Scripts: - β€’ ide:sync β†’ calls ide-sync-workflow.js with "quick" - β€’ ide:sync:full β†’ calls with "full" - β€’ ide:sync:deploy-ready β†’ calls with "deploy-ready" - - -═════════════════════════════════════════════════════════════════════════════ - - -✨ KEY FEATURES - -═════════════════════════════════════════════════════════════════════════════ - -βœ… Consistent Quality Gates - Every machine runs same lint + type-check - -βœ… Git Awareness - Know if you're ahead/behind origin/main - -βœ… Namecheap Integration - Validate deployment secrets before deploy - -βœ… Non-Breaking Warnings - Warnings don't stop workflow (unless strict mode) - -βœ… Clear Next Steps - Each workflow output tells you what to do next - -βœ… Colored Output - βœ… Green = passed, ⚠️ Yellow = warning, ❌ Red = failed - -βœ… Multiple Entry Points - Terminal (npm run) OR VS Code (tasks) - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ“š DOCUMENTATION - -═════════════════════════════════════════════════════════════════════════════ - -Primary Guide: - IDE_PIPELINE_WORKFLOW.md (this directory) - β€’ Complete user guide - β€’ Commands & examples - β€’ Best practices - β€’ Troubleshooting - -Reference Files: - β€’ scripts/ide-sync-workflow.js (implementation) - β€’ .vscode/tasks.json (VS Code config) - β€’ NAMECHEAP_MIGRATION_CHECKLIST.md (deployment setup) - β€’ HF_INTEGRATION_GUIDE.md (HF setup) - - -═════════════════════════════════════════════════════════════════════════════ - - -🎁 WHAT YOU GET - -═════════════════════════════════════════════════════════════════════════════ - -βœ… Consistent Development Across Machines - Same checks, same output format, same next steps - -βœ… One Command (Multiple Options) - npm run ide:sync (quick) - npm run ide:sync:full (before push) - npm run ide:sync:deploy-ready (before deploy) - -βœ… VS Code Integration - 8 tasks available via Command Palette - -βœ… Namecheap-Ready - Validates all deployment requirements - -βœ… GitHub Actions Integration - Automated CI/CD pipeline included - -βœ… Best Practices Built-In - Multi-location workflow documented - - -═════════════════════════════════════════════════════════════════════════════ - - -πŸ“ž SUPPORT - -═════════════════════════════════════════════════════════════════════════════ - -Questions? - Email: darkmodder33@proton.me - GitHub: https://github.com/DarkModder33/main - -Documentation: - β€’ IDE_PIPELINE_WORKFLOW.md (complete guide) - β€’ scripts/ide-sync-workflow.js (implementation details) - β€’ .vscode/tasks.json (task definitions) - -Related Docs: - β€’ NAMECHEAP_MIGRATION_CHECKLIST.md - β€’ HF_INTEGRATION_GUIDE.md - β€’ BUILD_COMPLETE.md - - -═════════════════════════════════════════════════════════════════════════════ - - -βœ… STATUS: PRODUCTION READY - -═════════════════════════════════════════════════════════════════════════════ - -Latest Commit: b1bd828 (IDE Pipeline & Multi-Location Sync) - -Implementation: βœ… Complete - β€’ ide-sync-workflow.js (1,000+ LOC) - β€’ VS Code tasks configured - β€’ Best practices documented - -Testing: βœ… Ready - β€’ Quick sync: 30 seconds - β€’ Full sync: 2 minutes - β€’ Deploy-ready: 2 minutes - -Usage: βœ… Immediate - β€’ npm run ide:sync - β€’ npm run ide:sync:full - β€’ npm run ide:sync:deploy-ready - -Documentation: βœ… Complete - β€’ User guide (IDE_PIPELINE_WORKFLOW.md) - β€’ Examples & troubleshooting - β€’ Secret management guide - - -═════════════════════════════════════════════════════════════════════════════ - - ONE COMMAND TO KEEP EVERYTHING IN SYNC - - Use from any machine, any location - - npm run ide:sync (to get started) - -═════════════════════════════════════════════════════════════════════════════ diff --git a/IDE_PIPELINE_WORKFLOW.md b/IDE_PIPELINE_WORKFLOW.md deleted file mode 100644 index 4bb29175..00000000 --- a/IDE_PIPELINE_WORKFLOW.md +++ /dev/null @@ -1,419 +0,0 @@ -# πŸ”„ TradeHax IDE Pipeline & Multi-Location Sync Workflow - -## Overview - -The IDE Pipeline ensures **consistent development experience** across any machine/location with: -- βœ… Same quality gates everywhere (lint + type-check) -- βœ… Quick awareness of sync state vs origin/main -- βœ… Optional build and deploy-readiness checks -- βœ… One command in terminal OR one task in VS Code - ---- - -## Commands - -### Quick Sync (Default) - -```bash -npm run ide:sync -``` - -**Runs:** -- `git fetch origin main` β€” Get latest from remote -- Git ahead/behind report β€” How many commits ahead/behind -- `npm run hooks:install` β€” Install git hooks (best effort) -- `npm run lint` β€” ESLint check -- `npm run type-check` β€” TypeScript check - -**Time:** ~30 seconds -**Use:** When opening the project or starting a work session - ---- - -### Full Sync (Recommended Before Pushing) - -```bash -npm run ide:sync:full -``` - -**Runs:** -- Quick Sync (all of above) -- `npm ci --legacy-peer-deps` β€” Clean install dependencies -- `npm run build` β€” Production build -- Namecheap deploy config check (warning mode) - -**Time:** ~2 minutes -**Use:** Before pushing code to GitHub - ---- - -### Deploy-Ready Strict Sync - -```bash -npm run ide:sync:deploy-ready -``` - -**Runs:** -- Full Sync (all of above) -- **Strict** Namecheap deploy config check (FAILS if secrets missing) -- Final git status check - -**Time:** ~2 minutes -**Use:** Before Namecheap deployment (confirms all secrets configured) - ---- - -## VS Code Tasks - -Use **Command Palette** (`Ctrl+Shift+P` / `Cmd+Shift+P`): - -### Run Task -Search for and select: -- **TradeHax: IDE Sync (Quick)** β€” Quick sync -- **TradeHax: IDE Sync (Full)** β€” Full sync with build -- **TradeHax: IDE Sync (Deploy Ready)** β€” Strict pre-deployment check -- **TradeHax: Lint** β€” Run ESLint only -- **TradeHax: Type Check** β€” Run TypeScript only -- **TradeHax: Build** β€” Production build only -- **TradeHax: Dev Server** β€” Start dev server -- **TradeHax: Deploy to Namecheap** β€” Deploy script - -### Keyboard Shortcuts -- Quick sync: `Ctrl+Shift+B` (default build task) -- View all tasks: `Ctrl+Shift+P` β†’ "Run Task" - ---- - -## Multi-Location Best Practices - -### 1. **When Opening Workspace** -```bash -npm run ide:sync -``` -Brings you in sync with remote and validates local setup. - -### 2. **If Behind origin/main** -```bash -git pull origin main -``` -Always pull before making changes. - -### 3. **Before Pushing Code** -```bash -npm run ide:sync:full -``` -Ensures: -- No lint errors -- No type errors -- Build succeeds -- Dependencies up to date - -### 4. **Before Namecheap Deployment** -```bash -npm run ide:sync:deploy-ready -``` -Ensures: -- All of above + -- Namecheap secrets configured -- Deploy-ready status verified - -### 5. **Secret Management** -- βœ… Keep secrets **ONLY** in: - - GitHub Actions secrets (for CI/CD) - - Server env files (on Namecheap) - - Local `.env` file (never committed) -- ❌ **NEVER** commit secrets to Git -- ❌ **NEVER** hardcode credentials - ---- - -## Environment Variables & Secrets - -### Required Namecheap Secrets (GitHub Actions) - -Set in **GitHub Repository Settings β†’ Secrets and variables β†’ Actions**: - -``` -NAMECHEAP_VPS_HOST=199.188.201.164 -NAMECHEAP_VPS_USER=traddhou -NAMECHEAP_VPS_SSH_KEY=(your private SSH key) -``` - -### Optional Namecheap Secrets - -``` -NAMECHEAP_VPS_PORT=22 (default) -NAMECHEAP_APP_ROOT=/home/traddhou/public_html -NAMECHEAP_APP_PORT=3000 -``` - -### Local Development (.env) - -```bash -# Copy template -cp .env.example .env - -# Add secrets -HF_API_TOKEN=hf_your_token -NEXTAUTH_SECRET=your_secret_key -DATABASE_URL=your_database_url - -# Add Namecheap config (optional for local testing) -NAMECHEAP_VPS_HOST=199.188.201.164 -NAMECHEAP_VPS_USER=traddhou -``` - ---- - -## Workflow Examples - -### Example 1: Morning Startup (Any Location) - -```bash -# Open VS Code and command palette -Ctrl+Shift+P - -# Select: Run Task β†’ TradeHax: IDE Sync (Quick) -# Wait ~30 seconds - -# Now safe to start editing -npm run dev -``` - -### Example 2: Prepare Code for Push - -```bash -# After making changes -npm run ide:sync:full - -# If all checks pass: -git add . -git commit -m "feat: add new feature" -git push origin main - -# If checks fail: -# Fix errors and run again -npm run ide:sync:full -``` - -### Example 3: Deploy to Namecheap - -```bash -# Ensure all code is pushed -git push origin main - -# Run strict deployment check -npm run ide:sync:deploy-ready - -# If passed: -bash scripts/deploy-to-namecheap.sh - -# Monitor deployment -pm2 logs tradehax -``` - ---- - -## Output Examples - -### Quick Sync Success - -``` -╔══════════════════════════════════════════════════════════════════╗ -β•‘ TradeHax IDE Sync (Quick) β•‘ -β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -Step 1: Git Fetch & Status -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - - β†’ Fetching latest from origin/main... -βœ… Fetching latest from origin/main - -ℹ️ Git Status: 0 commits ahead, 0 commits behind - -βœ… Installing git hooks -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -Step 3: Linting -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - -βœ… Running ESLint -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -Step 4: Type Checking -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - -βœ… Running TypeScript check - -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -Quick Sync Summary -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - -Passed: 5/5 -Warnings: 0/5 -Failed: 0/5 - -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -Next Steps -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - -βœ… Quick sync complete! Ready to edit. - -Before pushing: - npm run ide:sync:full -``` - -### Full Sync with Build - -``` -Passed: 10/10 -Warnings: 1/10 (non-blocking) -Failed: 0/10 - -βœ… Full sync complete! Ready to push. - -Before Namecheap deployment: - npm run ide:sync:deploy-ready -``` - -### Deploy-Ready Strict Check - -``` -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -Step 8: Strict Namecheap Deploy Config Check -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - -Strict Namecheap Deploy Config Check: - -Required Variables: -βœ… VPS Host (NAMECHEAP_VPS_HOST): Configured -βœ… VPS Username (NAMECHEAP_VPS_USER): Configured -βœ… SSH Private Key (NAMECHEAP_VPS_SSH_KEY): Configured - -Optional Variables: -βœ… SSH Port (NAMECHEAP_VPS_PORT): Using default (22) -ℹ️ App Root (NAMECHEAP_APP_ROOT): Using default (/home/traddhou/public_html) -ℹ️ App Port (NAMECHEAP_APP_PORT): Using default (3000) - -βœ… Deploy-ready sync complete! Ready to deploy. - -Deploy to Namecheap: - bash scripts/deploy-to-namecheap.sh -``` - ---- - -## Troubleshooting - -### Lint Errors - -```bash -# See all lint errors -npm run lint - -# Auto-fix fixable errors -npm run lint:fix - -# Then run sync again -npm run ide:sync:full -``` - -### Type Errors - -```bash -# See all type errors -npm run type-check - -# Fix errors in your IDE -# Then run sync again -npm run ide:sync:full -``` - -### Build Fails - -```bash -# Check build log -npm run build - -# Common fixes: -npm run clean # Clear cache -npm ci # Reinstall deps -npm run build # Rebuild - -# Then run full sync -npm run ide:sync:full -``` - -### Missing Namecheap Secrets - -For **local development**: -```bash -# Add to .env (never commit) -NAMECHEAP_VPS_HOST=199.188.201.164 -NAMECHEAP_VPS_USER=traddhou -``` - -For **CI/CD (GitHub Actions)**: -1. Go to **Settings β†’ Secrets and variables β†’ Actions** -2. Add: `NAMECHEAP_VPS_HOST`, `NAMECHEAP_VPS_USER`, `NAMECHEAP_VPS_SSH_KEY` -3. Run sync again - ---- - -## GitHub Actions CI/CD Integration - -The IDE sync workflow integrates with GitHub Actions for automated deployment: - -**Workflow file:** `.github/workflows/namecheap-vps-deploy.yml` - -**Automatically:** -- Runs lint + type-check on every push -- Runs full build on PRs -- Can deploy to Namecheap on tag or manual trigger - ---- - -## Quick Reference - -| Command | Time | Use Case | -|---------|------|----------| -| `npm run ide:sync` | 30s | Opening project, daily sync | -| `npm run ide:sync:full` | 2m | Before pushing code | -| `npm run ide:sync:deploy-ready` | 2m | Before Namecheap deploy | -| `npm run lint` | 10s | Check linting only | -| `npm run type-check` | 15s | Check types only | -| `npm run build` | 90s | Build only | -| `npm run dev` | - | Start dev server | - ---- - -## File Locations - -**Configuration:** -- `.vscode/tasks.json` β€” VS Code task definitions -- `scripts/ide-sync-workflow.js` β€” Main sync script -- `.github/workflows/` β€” CI/CD automation - -**Documentation:** -- This file β€” IDE Pipeline guide -- `NAMECHEAP_MIGRATION_CHECKLIST.md` β€” Deployment checklist -- `HF_INTEGRATION_GUIDE.md` β€” HF setup guide - ---- - -## Support - -**Questions?** - -Email: darkmodder33@proton.me -GitHub: https://github.com/DarkModder33/main - -**Check Also:** -- `NAMECHEAP_MIGRATION_CHECKLIST.md` β€” Deployment setup -- `.github/workflows/namecheap-vps-deploy.yml` β€” CI/CD config -- `scripts/ide-sync-workflow.js` β€” Implementation details - ---- - -**Status:** βœ… Production Ready - -One command to keep your development environment in sync across any machine/location! diff --git a/INTEGRATION_GUIDE.md b/INTEGRATION_GUIDE.md deleted file mode 100644 index 5790a1c1..00000000 --- a/INTEGRATION_GUIDE.md +++ /dev/null @@ -1,381 +0,0 @@ -# TradeHax Integration Guide - Backend + Frontend Deployment - -This guide covers deploying the **Astral Awakening: TradeHax** game (backend API + React frontend) and integrating it with the main site. - -## 🎯 Architecture Overview - -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Main Site (GitHub Pages) β”‚ -β”‚ tradehax.net (index.html, services.html) β”‚ -β”‚ + Game Links & Navigation β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ Links to game - β–Ό -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Frontend (GitHub Pages or Vercel) β”‚ -β”‚ tradehax-frontend/dist β†’ game.tradehax.net or /game β”‚ -β”‚ React + Three.js Game with Web3 β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ API calls - β–Ό -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Backend (Vercel) β”‚ -β”‚ tradehax-backend β†’ api.tradehax.net or vercel deploy β”‚ -β”‚ Express.js + Solana + NFT Minting β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -## Prerequisites - -- **Node.js 18+** and npm -- **Git** and GitHub account -- **Vercel account** (for backend deployment) -- **Solana CLI** (for token setup) -- **SHAMROCK token** created on Solana devnet - -## Step 1: Backend Setup (Vercel) - -### 1.1 Create SHAMROCK Token (One-time) - -Follow the detailed instructions in [SHAMROCK_SETUP.md](./SHAMROCK_SETUP.md) sections 1-1.11: - -```bash -# Quick summary: -solana config set --url https://api.devnet.solana.com -solana-keygen new --outfile ~/my-wallets/authority-keypair.json -solana airdrop 2 ~/my-wallets/authority-keypair.json --url devnet -solana-keygen new --outfile mint-keypair.json -MINT_PUBKEY=$(solana-keygen pubkey mint-keypair.json) -# ... [continue with full setup from SHAMROCK_SETUP.md] -``` - -**You'll need these values:** -- `SHAMROCK_MINT` - Token mint public key -- `AUTHORITY_SECRET` - Authority keypair array (from `cat ~/my-wallets/authority-keypair.json`) -- `TOKEN_ACCOUNT` - Token account public key - -### 1.2 Test Backend Locally - -```bash -cd tradehax-backend -npm install -npm run dev -``` - -Visit `http://localhost:3001/api/health` - should return: -```json -{"status":"ok","message":"TradeHax backend is running"} -``` - -### 1.3 Deploy to Vercel - -**Option A: Using Vercel CLI (Recommended)** - -```bash -npm install -g vercel -cd tradehax-backend -vercel login -vercel --prod -``` - -**Option B: Using GitHub Integration** - -1. Push `tradehax-backend` to your GitHub repo -2. Go to https://vercel.com -3. Click "New Project" β†’ "Import from Git" -4. Select your repository -5. Set root directory to `tradehax-backend` -6. Deploy - -### 1.4 Add Environment Variables in Vercel - -In your Vercel dashboard: - -1. Go to **Settings** β†’ **Environment Variables** -2. Add each variable: - -``` -SHAMROCK_MINT= -AUTHORITY_SECRET= -SOLANA_RPC=https://api.devnet.solana.com -MONGODB_URI= -TWITTER_APP_KEY= -TWITTER_APP_SECRET= -TWITTER_ACCESS_TOKEN= -TWITTER_ACCESS_SECRET= -``` - -### 1.5 Verify Deployment - -```bash -curl https://your-vercel-app.vercel.app/api/health -``` - -Should return `{"status":"ok","message":"TradeHax backend is running"}` - -**Save your backend URL** for the next step. - ---- - -## Step 2: Frontend Setup - -### 2.1 Install Dependencies - -```bash -cd tradehax-frontend -npm install -``` - -### 2.2 Configure Environment - -Edit or create `tradehax-frontend/.env`: - -``` -VITE_BACKEND_URL=https://your-backend-url.vercel.app -VITE_SOLANA_NETWORK=devnet -``` - -Replace `your-backend-url.vercel.app` with your actual Vercel backend URL. - -### 2.3 Test Locally - -```bash -npm run dev -``` - -Visit `http://localhost:5173` and test: -- Connect Phantom wallet -- Collect clovers (WASD movement) -- Tweet quest reward -- NFT minting - -### 2.4 Build for Production - -```bash -npm run build -``` - -Creates optimized `dist/` folder ready to deploy. - -### 2.5 Deploy to GitHub Pages - -The workflow at `.github/workflows/deploy-frontend.yml` automatically deploys when you push to `main` with changes to `tradehax-frontend/`. - -**To manually deploy:** - -```bash -npm run build -# GitHub Actions will handle deployment automatically -git add tradehax-frontend/ -git commit -m "Update game frontend" -git push origin main -``` - -Check deployment progress: -1. Go to your GitHub repo -2. Click **Actions** tab -3. Find "Deploy Frontend to GitHub Pages" workflow -4. Wait for βœ… completion - -**Your game will be live at:** `https://.github.io/tradehax-frontend/` (unless configured with custom domain) - ---- - -## Step 3: Configure Custom Domain (Optional) - -### 3.1 For Frontend (Game) - -1. In GitHub repo: **Settings** β†’ **Pages** -2. Under "Build and deployment": - - Source: **GitHub Actions** -3. Custom domain (if desired): - - Add subdomain record in DNS pointing to GitHub Pages - -### 3.2 For Backend - -If using Vercel, they provide a `.vercel.app` domain automatically. For custom domain: - -1. Vercel Dashboard β†’ Your Project β†’ **Settings** β†’ **Domains** -2. Add your domain and follow DNS setup - ---- - -## Step 4: Integrate with Main Site - -### 4.1 Add Game Link to Navigation - -Edit `index.html` - add game link to navigation section: - -```html -Play Astral Awakening -``` - -Or link to your custom game domain: - -```html -Play Astral Awakening -``` - -### 4.2 Add Game Section to Homepage - -Add a new section to `index.html` (optional but recommended): - -```html -
-

Astral Awakening: The Game

-

Collect clovers, earn SHAMROCK tokens, and mint NFT skins!

- Play Now -
-``` - -### 4.3 Update Sitemap - -Edit `sitemap.xml` to include game URL: - -```xml - - https://tradehax.net/tradehax-frontend/ - weekly - -``` - ---- - -## Testing Checklist - -Before going live, verify: - -### Backend -- [ ] Health endpoint returns `{"status":"ok",...}` -- [ ] Backend logs show no errors -- [ ] Environment variables set in Vercel dashboard -- [ ] Token transfer test works (manual curl request) - -### Frontend -- [ ] Builds successfully (`npm run build`) -- [ ] Loads at deployment URL -- [ ] Wallet connection works with Phantom/Solflare -- [ ] Can collect clovers (WASD movement) -- [ ] Energy system works (5 clovers = 100 energy) -- [ ] Tweet quest submission works -- [ ] NFT minting panel displays skins -- [ ] Backend URL in `.env` is correct -- [ ] No console errors (F12 β†’ Console) - -### Integration -- [ ] Main site navigation links to game -- [ ] Game page loads correctly -- [ ] Smooth scroll/navigation between site sections -- [ ] Mobile responsive on both sites - ---- - -## Troubleshooting - -### Backend won't deploy -```bash -# Verify file structure -ls tradehax-backend/ -# Should have: api/, package.json, vercel.json - -# Check for syntax errors -node tradehax-backend/api/index.js -``` - -### Frontend won't connect to backend -```bash -# Check your .env file -cat tradehax-frontend/.env - -# Verify backend URL is correct -curl https://your-backend.vercel.app/api/health - -# Check browser console (F12) for error messages -``` - -### Wallet connection fails -- Ensure Phantom wallet is installed -- Switch Phantom to **Solana Devnet** -- Try refreshing the page -- Clear browser cache - -### Rewards not showing -- Verify tweet contains `#HyperboreaAscent` -- Check authority wallet has SOL for gas fees -- Review Vercel backend logs for errors -- Confirm `SHAMROCK_MINT` environment variable is correct - -### GitHub Actions workflow not running -1. Push changes to `tradehax-frontend/` directory -2. Check **Actions** tab in GitHub repo -3. If blocked: Go to **Actions** β†’ **Enable workflows** -4. Re-push changes to trigger - ---- - -## Production Migration to Mainnet - -When ready to go live on Solana mainnet: - -1. Create new SHAMROCK token on mainnet (repeat Step 1 with `--url mainnet-beta`) -2. Update backend environment variables: - - `SOLANA_RPC`: `https://api.mainnet-beta.solana.com` - - `SHAMROCK_MINT`: New mainnet mint address - - `AUTHORITY_SECRET`: New mainnet authority keypair -3. Fund authority wallet with real SOL -4. Update frontend `.env`: - - `VITE_SOLANA_NETWORK=mainnet-beta` -5. Redeploy both backend and frontend -6. Test with real wallets on mainnet - ---- - -## Useful Commands - -### Frontend -```bash -cd tradehax-frontend -npm install # Install dependencies -npm run dev # Start dev server (localhost:5173) -npm run build # Build for production -npm run lint # Check code quality -``` - -### Backend -```bash -cd tradehax-backend -npm install # Install dependencies -npm run dev # Start dev server (localhost:3001) -npm run start # Production start -``` - -### Git -```bash -git status # See changes -git add . # Stage all changes -git commit -m "message" # Commit -git push origin main # Push to GitHub -``` - ---- - -## Resources - -- **Frontend README**: [tradehax-frontend/README.md](./tradehax-frontend/README.md) -- **Backend README**: [tradehax-backend/README.md](./tradehax-backend/README.md) -- **SHAMROCK Token Setup**: [SHAMROCK_SETUP.md](./SHAMROCK_SETUP.md) -- **Quick Start**: [QUICK_START.md](./QUICK_START.md) -- **Vercel Docs**: https://vercel.com/docs -- **GitHub Pages Docs**: https://docs.github.com/en/pages -- **Solana Docs**: https://docs.solana.com - ---- - -## Support - -For issues: -1. Check browser console (F12) -2. Review backend logs in Vercel -3. Check GitHub Actions logs for deployment errors -4. Refer to README files for each app diff --git a/INTELLIGENCE_BUILD_LOG.md b/INTELLIGENCE_BUILD_LOG.md deleted file mode 100644 index 18bbb646..00000000 --- a/INTELLIGENCE_BUILD_LOG.md +++ /dev/null @@ -1,281 +0,0 @@ -# TradeHax Intelligence Build Log - -Last Updated: 2026-02-19 - -## Major Checkpoints - -- [x] Checkpoint 1: Project kickoff, scope, and architecture drafted -- [x] Checkpoint 2: Intelligence data model + mock feed layer scaffolded -- [x] Checkpoint 3: API routes for flow/dark-pool/politics/crypto/news live -- [x] Checkpoint 4: Intelligence UI routes and responsive dashboards live -- [x] Checkpoint 5: AI copilot + YouTube/Discord content bridge live -- [x] Checkpoint 6: QA pass, CI pass, deployment verification -- [x] Checkpoint 7: Provider adapter layer (mock/vendor modes) integrated -- [x] Checkpoint 8: Watchlist + persistent alert history APIs integrated -- [x] Checkpoint 9: Discord emitters + tier channel routing integrated -- [x] Checkpoint 10: Phase 2 QA, CI pass, commit/push, deployment verification -- [x] Checkpoint 11: Direct vendor adapters wired (Unusual Whales, Polygon, Bloomberg proxy) -- [x] Checkpoint 12: Durable watchlist/alerts storage adapter added (Supabase + memory fallback) -- [x] Checkpoint 13: Discord thread routing by strategy/risk integrated -- [x] Checkpoint 14: Phase 3 QA, CI pass, commit/push, deployment verification -- [x] Checkpoint 15: Live WebSocket ingestion scaffold integrated (overlay + stream APIs) -- [x] Checkpoint 16: Alert SLA metrics engine + Ops dashboard integrated -- [x] Checkpoint 17: Phase 4 QA, CI pass, commit/push, deployment verification -- [x] Checkpoint 18: Consent-aware AI ingestion + admin dataset export + Discord/alerts telemetry wiring - -## Progress Notes - -### 2026-02-19 - Kickoff -- Established v1 delivery scope inspired by institutional flow intelligence platforms. -- Defined route architecture for: - - `/intelligence` - - `/intelligence/flow` - - `/intelligence/dark-pool` - - `/intelligence/politics` - - `/intelligence/crypto-flow` - - `/intelligence/news` - - `/intelligence/calculator` - - `/intelligence/content` -- Confirmed this phase prioritizes usability and modular expansion over data-vendor lock-in. - -### 2026-02-19 - Implementation Objective -- Deliver production-ready scaffolding that connects: - - Equities/Options intelligence - - Crypto intelligence - - AI copilot workflows - - YouTube/Discord content automation hooks - -### 2026-02-19 - Checkpoint 3 Complete -- Added secured intelligence API routes: - - `/api/intelligence/overview` - - `/api/intelligence/flow` - - `/api/intelligence/dark-pool` - - `/api/intelligence/politics` - - `/api/intelligence/crypto-flow` - - `/api/intelligence/news` -- Added AI/media endpoints: - - `/api/intelligence/copilot` - - `/api/intelligence/content/daily-brief` - -### 2026-02-19 - Checkpoint 4 Complete -- Added full UI route set: - - `/intelligence` - - `/intelligence/flow` - - `/intelligence/dark-pool` - - `/intelligence/politics` - - `/intelligence/crypto-flow` - - `/intelligence/news` - - `/intelligence/calculator` - - `/intelligence/content` -- Added reusable intelligence UI components and responsive filter/table experiences. -- Added navigation links to Intelligence hub in header/footer/global nav. - -### 2026-02-19 - Checkpoint 5 Complete -- Added embedded AI copilot panel on core intelligence pages. -- Added daily YouTube + Discord brief generator with fallback behavior when HF is unavailable. -- Connected content generation directly to intelligence summary context. - -### 2026-02-19 - Build + Push Status -- CI pipeline passed (`npm run pipeline:ci`) with zero lint/type errors. -- Commit pushed to `origin/main`: `358c303`. -- Deployment verified live: - - `https://www.tradehax.net/intelligence` -> `200` - - `https://www.tradehax.net/api/intelligence/flow` -> `200` -- Status: checkpoint complete. - -### 2026-02-19 - Phase 2 Checkpoint 7 Complete -- Added provider abstraction layer: - - `lib/intelligence/provider.ts` - - Supports env-driven mode selection: `mock` or `vendor`. - - Adds provider metadata (`source`, `vendor`, `configured`, `simulated`). -- Updated intelligence feed APIs to resolve data through provider snapshot. -- Added provider status endpoint: - - `/api/intelligence/provider` - -### 2026-02-19 - Phase 2 Checkpoint 8 Complete -- Added watchlist + alert persistence store: - - `lib/intelligence/watchlist-store.ts` -- Added new APIs: - - `/api/intelligence/watchlist` (GET/POST/DELETE) - - `/api/intelligence/alerts` (GET/POST with evaluate + dispatch flow) -- Implemented alert generation for: - - options flow - - dark pool prints - - crypto flow triggers - - catalyst news for watched symbols - -### 2026-02-19 - Phase 2 Checkpoint 9 Complete -- Added Discord webhook routing utilities: - - `lib/intelligence/discord.ts` -- Added tier-based channel routing strategy: - - `free/basic/pro/elite` route resolution - - Per-tier webhook overrides with global fallback -- Added delivery tracking on alert objects (`deliveredToDiscordAt`). - -### 2026-02-19 - Phase 2 UI Surface -- Added new route: - - `/intelligence/watchlist` -- Added new UI component: - - `components/intelligence/WatchlistPanel.tsx` -- Updated hub route cards and quick links for watchlist workflow. - -### 2026-02-19 - Phase 2 Build + Push Status -- CI pipeline passed (`npm run pipeline:ci`) after Phase 2 integration. -- Commit pushed to `origin/main`: `717a3b7`. -- Deployment verification: - - `https://www.tradehax.net/intelligence/watchlist` -> `200` - - `https://www.tradehax.net/api/intelligence/alerts` -> `200` - - `https://www.tradehax.net/api/intelligence/provider` -> `200` -- `https://www.tradehaxai.tech/intelligence/watchlist` -> `200` -- Status: Phase 2 complete. - -### 2026-02-19 - Phase 3 Checkpoint 11 Complete -- Reworked provider engine to async, cache-aware vendor routing: - - `lib/intelligence/provider.ts` -- Added direct vendor HTTP adapter layer: - - `lib/intelligence/vendor-adapters.ts` -- Vendor coverage: - - Unusual Whales adapter (endpoint-driven) - - Polygon adapter (snapshot/news) - - Bloomberg proxy adapter (endpoint-driven) -- Added provider metadata enhancements: - - `mode`, `detail`, `lastError`, cache TTL reporting - -### 2026-02-19 - Phase 3 Checkpoint 12 Complete -- Added persistent storage layer: - - `lib/intelligence/persistence.ts` -- Added dual-mode storage support: - - Supabase/Postgres REST mode - - in-memory fallback mode -- Migrated watchlist/alert engine to async persistence-backed flow: - - `lib/intelligence/watchlist-store.ts` -- Added storage status API: - - `/api/intelligence/storage` -- Added Supabase schema: - - `db/supabase/intelligence_phase3.sql` - -### 2026-02-19 - Phase 3 Checkpoint 13 Complete -- Reworked Discord dispatch for strategy/risk thread routing: - - `lib/intelligence/discord.ts` -- Routing now groups alerts by: - - strategy (`options_flow`, `dark_pool`, `crypto_flow`, `catalyst_news`) - - risk (`urgent`, `watch`, `info`) -- Added thread ID resolution hierarchy: - - strategy+risk exact -> strategy -> risk -> default thread ID -- Added support for provided IDs in env templates: - - Discord ID: `1421509686443905094` - - Vercel Project ID: `prj_LDmkGrAq06c1DJcH98BeN6GYhZpW` - -### 2026-02-19 - Phase 3 Build + Push Status -- CI pipeline passed (`npm run pipeline:ci`) after Phase 3 integration. -- Commit pushed to `origin/main`: `d1825d3`. -- Deployment verification: - - `https://www.tradehax.net/api/intelligence/storage` -> `200` - - `https://www.tradehax.net/api/intelligence/provider` -> `200` - - `https://www.tradehax.net/intelligence/watchlist` -> `200` - - `https://www.tradehaxai.tech/api/intelligence/storage` -> `200` -- Runtime checks: - - Storage mode endpoint responding (`memory` fallback active by default until Supabase vars are set). - - Provider endpoint responding with mode metadata (`simulated` until vendor keys/endpoints are configured). -- Status: Phase 3 complete. - -### 2026-02-19 - Phase 4 Checkpoint 15 Complete -- Added live ingestion subsystem: - - `lib/intelligence/live-ingestion.ts` -- Added optional WebSocket ingestion controls: - - `TRADEHAX_INTELLIGENCE_WS_ENABLED` - - `TRADEHAX_INTELLIGENCE_WS_URL` - - `TRADEHAX_INTELLIGENCE_WS_PROTOCOL` - - `TRADEHAX_INTELLIGENCE_WS_RECONNECT_MS` -- Added live APIs: - - `/api/intelligence/live/status` - - `/api/intelligence/live/stream` (SSE) -- Integrated live overlay application into provider snapshots. - -### 2026-02-19 - Phase 4 Checkpoint 16 Complete -- Added SLA metrics engine: - - `lib/intelligence/metrics.ts` -- Instrumented: - - provider request quality/latency - - alert generation counts - - Discord dispatch drop/delivery performance - - live ingestion connection/message/error signals -- Added metrics API: - - `/api/intelligence/metrics` -- Added operations UI: - - `components/intelligence/IntelligenceOpsPanel.tsx` - - `app/intelligence/ops/page.tsx` -- Added hub navigation entry: - - `/intelligence/ops` -- Wired Discord application metadata into env templates: - - Application ID: `1450053974018494515` - - Public Key: `af33c2c6795e6ea3616748fc160bde9096844f2fc78cdde07035cf35633c4267` - -### 2026-02-19 - Phase 4 Build + Push Status -- CI pipeline passed (`npm run pipeline:ci`) after Phase 4 integration. -- Commit pushed to `origin/main`: `7988211`. -- Deployment verification: - - `https://www.tradehax.net/intelligence/ops` -> `200` - - `https://www.tradehax.net/api/intelligence/metrics` -> `200` - - `https://www.tradehax.net/api/intelligence/live/status` -> `200` - - `https://www.tradehax.net/api/intelligence/live/stream` -> `200` - - `https://www.tradehaxai.tech/api/intelligence/storage` -> `200` -- Runtime checks: - - Metrics endpoint returns SLA payload schema. - - Live status endpoint returns ingestion state (`enabled=false` by default until ws env vars are set). -- Status: Phase 4 complete. - -### 2026-02-19 - Checkpoint 18 Complete -- Added reusable admin access utility: - - `lib/admin-access.ts` - - Supports `TRADEHAX_ADMIN_KEY` and optional `TRADEHAX_SUPERUSER_CODE`. -- Upgraded ingestion engine: - - `lib/ai/data-ingestion.ts` - - Added consent-aware ingestion, pseudonymous user mapping, redaction, dataset export, and bounded memory retention. -- Added admin dataset endpoint: - - `/api/ai/admin/dataset` - - JSON summary mode + JSONL export mode (admin-gated). -- Wired ingestion telemetry into: - - `/api/ai/chat` - - `/api/ai/custom` - - `/api/ai/generate-image` - - `lib/intelligence/discord.ts` dispatch lifecycle - - `/api/intelligence/alerts` evaluation workflow -- Updated env templates with new ingestion/admin controls. - -## Active TODO - -- [x] Add API endpoints with secure origin/rate limits. -- [x] Build reusable intelligence page shell + cards/tables. -- [x] Add AI copilot endpoint and UI panel for context-aware analysis. -- [x] Add media brief generator endpoint for YouTube + Discord workflows. -- [x] Add top navigation + footer links for Intelligence hub. -- [x] Integrate provider adapter architecture for vendor transition. -- [x] Build user watchlists with alert persistence and evaluation flows. -- [x] Add role-based Discord routing + webhook emission support. -- [x] Add `/intelligence/watchlist` UI flow. -- [x] Wire direct vendor HTTP adapters (Unusual Whales/Polygon/Bloomberg proxy). -- [x] Add durable watchlist + alerts persistence adapter. -- [x] Add Discord thread routing by strategy/risk profile. -- [x] Add live ingestion subsystem and stream/status APIs. -- [x] Add SLA metrics engine and operations dashboard route. -- [x] Add consent-aware AI ingestion and pseudonymous profile mapping. -- [x] Add admin-gated dataset export endpoint. -- [x] Wire ingestion telemetry into AI + intelligence alert/Discord flows. -- [x] Update `.env.example` and `.env.vercel.production.template` for Phase 3 config. -- [x] Run `npm run pipeline:ci`. -- [x] Commit and push. - -## Post-Phase TODO - -- [x] Replace mock feeds with paid data vendor adapters. -- [x] Add user watchlists + persistent alerts. -- [x] Add Discord bot webhook emitters. -- [x] Add role-based channel routing for paid intelligence tiers. -- [x] Wire direct vendor HTTP adapters for specific providers (Unusual Whales, Polygon, Bloomberg). -- [x] Add persistent database storage for watchlists/alerts (Supabase/Postgres) for cross-deploy durability. -- [x] Add Discord thread routing by strategy type and risk profile. -- [x] Add live WebSocket ingestion for intraday flow updates. -- [x] Add alert SLA metrics panel (delivery latency, drop rate, provider error rate). -- [ ] Add signed Discord interactions endpoint verification for slash commands. -- [ ] Add websocket auth rotation + heartbeat alerts in Ops panel. diff --git a/KUBERNETES_DEPLOYMENT_STATUS.md b/KUBERNETES_DEPLOYMENT_STATUS.md deleted file mode 100644 index 0f27dda9..00000000 --- a/KUBERNETES_DEPLOYMENT_STATUS.md +++ /dev/null @@ -1,341 +0,0 @@ -# Kubernetes Cluster Status & Deployment Guide - -## βœ… Cluster Status - -### Kubernetes Cluster Info -- **Control Plane**: https://127.0.0.1:56927 -- **Version**: v1.31.1 -- **Status**: βœ… Ready -- **Node Count**: 1 (desktop-control-plane) -- **Node Status**: Ready - -### GitLab Agent -- **Namespace**: gitlab-agent-gitlab1 -- **Replicas**: 2/2 Running -- **Status**: βœ… Connected to GitLab -- **KAS Address**: wss://kas.gitlab.com -- **Agent ID**: agentk:3161108 -- **Leader**: gitlab-agent-v2-7cf65d9858-5n7gq - -### Running Services -- **kubernetes** (ClusterIP) - 10.96.0.1:443 -- **gitlab-agent-service** (ClusterIP) - 10.96.112.79:8080 -- **my-app** (1 pod running) - -## πŸš€ Deployment Architecture - -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ GitLab (glagent-emt2cmu7Cski...) β”‚ -β”‚ - Repository β”‚ -β”‚ - CI/CD Pipeline β”‚ -β”‚ - GitOps Configuration β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ WebSocket (WSS) - ↓ -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ KAS (kas.gitlab.com) β”‚ -β”‚ - Agent Communication β”‚ -β”‚ - Command Tunnel β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ - ↓ -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Local Kubernetes Cluster β”‚ -β”‚ v1.31.1 (desktop-control-plane) β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ gitlab-agent (2 replicas) β”‚ β”‚ -β”‚ β”‚ - Leader Election Active β”‚ β”‚ -β”‚ β”‚ - Ready for deployments β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ TradeHax (Ready to Deploy) β”‚ β”‚ -β”‚ β”‚ - deployment.yaml β”‚ β”‚ -β”‚ β”‚ - service.yaml β”‚ β”‚ -β”‚ β”‚ - ingress.yaml β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -## πŸ“‹ Cluster Capabilities - -βœ… **Pod Management** -- Create, delete, scale pods -- Resource limits & requests -- Health checks & auto-restart - -βœ… **Service Discovery** -- Internal DNS (CoreDNS) -- ClusterIP services -- Port exposure - -βœ… **Configuration** -- ConfigMaps (environment variables) -- Secrets (encrypted data) -- Resource quotas - -βœ… **Scaling** -- Horizontal Pod Autoscaler (HPA) -- Manual replica scaling -- Rolling updates - -βœ… **Networking** -- Pod-to-pod communication -- Service-to-service routing -- Ingress (with NGINX controller) - -βœ… **Storage** (if needed) -- Persistent Volumes (PV) -- Persistent Volume Claims (PVC) -- Storage classes - -βœ… **GitOps Ready** -- Flux integration possible -- ArgoCD ready -- GitLab Agent bridge active - -## 🎯 Deploy TradeHax to Cluster - -### Option 1: Direct kubectl Apply - -```bash -# Apply deployment manifests -kubectl apply -f k8s/deployment.yaml -kubectl apply -f k8s/ingress.yaml - -# Verify deployment -kubectl get deployments -kubectl get pods -kubectl get services -``` - -### Option 2: GitLab CI/CD Pipeline - -Push commits to trigger automatic deployment: - -```yaml -# In .gitlab-ci.yml -deploy:k8s: - stage: deploy - script: - - kubectl set image deployment/tradehax-app \ - tradehax=ghcr.io/darkmodder33/main:latest - - kubectl rollout status deployment/tradehax-app - only: - - main -``` - -### Option 3: Helm Deployment - -```bash -# Create Helm values -helm repo add tradehax https://charts.tradehaxai.tech -helm repo update - -# Deploy TradeHax -helm install tradehax tradehax/tradehax \ - --namespace default \ - --values helm/values.yaml -``` - -## πŸ“¦ Deployment Manifests Ready - -### Current Files in `k8s/` -- `deployment.yaml` - TradeHax app pods -- `ingress.yaml` - DNS routing & TLS -- `hpa.yaml` (can be created) - Auto-scaling -- `pdb.yaml` (can be created) - Pod disruption budget - -### Example Deployment - -```yaml -# k8s/deployment.yaml structure -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tradehax-app -spec: - replicas: 2 - template: - spec: - containers: - - name: tradehax - image: ghcr.io/darkmodder33/main:latest - ports: - - containerPort: 3000 - env: - - name: NODE_ENV - value: "production" - - name: HF_API_TOKEN - valueFrom: - secretKeyRef: - name: tradehax-secrets - key: hf-token -``` - -## πŸ” Secrets Management - -Create secrets for sensitive data: - -```bash -# Create secret for HF API token -kubectl create secret generic tradehax-secrets \ - --from-literal=hf-token=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 - -# Create secret for Docker registry (if private images) -kubectl create secret docker-registry ghcr-credentials \ - --docker-server=ghcr.io \ - --docker-username=DarkModder33 \ - --docker-password= -``` - -## πŸ“Š Monitoring & Debugging - -### Check Deployment Status -```bash -# View deployment -kubectl describe deployment tradehax-app - -# Check pod logs -kubectl logs -f deployment/tradehax-app - -# View events -kubectl get events --sort-by='.lastTimestamp' -``` - -### Scaling -```bash -# Manual scale -kubectl scale deployment tradehax-app --replicas=3 - -# Check HPA status -kubectl get hpa tradehax-hpa -kubectl describe hpa tradehax-hpa -``` - -### Network Testing -```bash -# Port forward to test locally -kubectl port-forward svc/tradehax-service 3000:80 - -# Test connectivity from pod -kubectl exec -it pod/tradehax-app-xxx -- curl localhost:3000 -``` - -## πŸ”„ GitOps Setup - -### Option A: Flux CD -```bash -# Install Flux -flux bootstrap github \ - --owner=DarkModder33 \ - --repo=main \ - --branch=main \ - --path=clusters/local - -# Flux will auto-sync k8s/ manifests -``` - -### Option B: ArgoCD -```bash -# Install ArgoCD -kubectl create namespace argocd -kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml - -# Create Application CRD -kubectl apply -f - < 70% - - Memory > 80% -``` - -### Performance Metrics -``` -Pod Startup Time: ~10-15 seconds -Request Latency: <200ms (cached) -Throughput: 100+ requests/sec per pod -Pod Disruption Budget: Min 1 always running -``` - -## πŸ” Security Features - -βœ… Non-root container user (1001) -βœ… Read-only root filesystem (when possible) -βœ… No privilege escalation -βœ… Resource limits enforced -βœ… Network policies (can be configured) -βœ… RBAC enabled -βœ… TLS encryption (Ingress) -βœ… Secret management for sensitive data - -## πŸ’Ύ Persistence & Storage - -Current setup: **Stateless** (recommended) -- No databases attached -- Horizontal scaling friendly -- Easy blue-green deployments - -Optional: Add persistent storage -```bash -# PostgreSQL StatefulSet -# Redis Cache -# MongoDB for analytics -# S3-compatible storage -``` - -## πŸ“Š Monitoring & Observability - -Built-in Kubernetes metrics: -```bash -# View real-time metrics -kubectl top nodes -kubectl top pods - -# View events -kubectl get events --sort-by='.lastTimestamp' - -# Check resource usage -kubectl describe node -``` - -Optional additions: -- Prometheus + Grafana -- ELK Stack (Elasticsearch, Logstash, Kibana) -- Jaeger (distributed tracing) -- DataDog / New Relic - -## πŸ”„ Continuous Deployment Workflow - -``` -1. Developer: git push origin main - ↓ -2. GitHub: Receives push - ↓ -3. GitLab Actions: Triggers CI/CD - ↓ -4. Build Stage: docker build, push to GHCR - ↓ -5. Test Stage: npm run build, lint - ↓ -6. Deploy Stage: kubectl set image via GitLab Agent - ↓ -7. Rollout: Rolling update (no downtime) - ↓ -8. Live: New version serving at tradehax.net - -Total time: ~5-10 minutes from push to live βœ… -``` - -## βœ… Pre-Deployment Checklist - -- [x] Kubernetes cluster running (v1.31.1) -- [x] GitLab Agent connected (2 replicas) -- [x] Deployment manifests created -- [x] Ingress configuration ready -- [x] CI/CD pipeline configured -- [x] Docker image building works -- [x] Container registry access (GHCR) -- [x] Resource limits configured -- [x] Health checks defined -- [x] Auto-scaling configured -- [ ] Domain DNS pointed to cluster -- [ ] TLS certificates configured -- [ ] Monitoring setup (optional) -- [ ] Backup strategy (optional) - -## 🎯 Next Steps - -### Immediate (To Go Live) - -```bash -# 1. Deploy NGINX Ingress -kubectl apply -f k8s/nginx-ingress.yaml - -# 2. Deploy TradeHax -kubectl apply -f k8s/deployment.yaml -kubectl apply -f k8s/ingress.yaml - -# 3. Verify running -kubectl get all -kubectl get ingress - -# 4. Test locally -kubectl port-forward svc/tradehax-service 3000:80 -# Visit: http://localhost:3000 -``` - -### Soon After (For Production) - -1. Configure domain DNS (Namecheap) -2. Setup TLS certificates (Let's Encrypt) -3. Enable GitOps auto-deploy -4. Setup monitoring -5. Configure backups - -### Optional Enhancements - -1. Add database (PostgreSQL) -2. Add cache layer (Redis) -3. Add CDN (Cloudflare) -4. Add DDoS protection -5. Add WAF (Web Application Firewall) - -## πŸ“ž Support Commands - -```bash -# Check cluster health -kubectl cluster-info -kubectl get nodes - -# View all resources -kubectl get all -A - -# Check pod status -kubectl get pods -o wide - -# View logs -kubectl logs -f deployment/tradehax-app - -# Execute into pod -kubectl exec -it -- /bin/sh - -# Port forward -kubectl port-forward svc/tradehax-service 3000:80 - -# Scale deployment -kubectl scale deployment tradehax-app --replicas=3 - -# Update image -kubectl set image deployment/tradehax-app \ - tradehax=ghcr.io/darkmodder33/main:latest - -# Rollback -kubectl rollout undo deployment/tradehax-app -``` - ---- - -## πŸŽ‰ CONCLUSION - -Your TradeHax AI platform is **fully prepared for Kubernetes deployment**: - -βœ… **Source Code**: Production-ready with AI/LLM/Trading/Image features -βœ… **Docker**: Containerized and tested -βœ… **Kubernetes**: Manifests created with auto-scaling & health checks -βœ… **GitOps**: CI/CD pipeline ready for auto-deployment -βœ… **GitLab Agent**: Connected and operational -βœ… **Documentation**: Complete deployment guides available - -**Status: READY FOR PRODUCTION DEPLOYMENT** πŸš€ - -When you're ready, run: `kubectl apply -f k8s/` - -Your app will be live at tradehaxai.tech within minutes! diff --git a/LOCAL_REPO_WORKFLOW.md b/LOCAL_REPO_WORKFLOW.md deleted file mode 100644 index 7d3f3e71..00000000 --- a/LOCAL_REPO_WORKFLOW.md +++ /dev/null @@ -1,38 +0,0 @@ -# Local Repository Workflow (Canonical + Mirror) - -This machine keeps two clones of the same project: - -- Canonical (active coding): `C:\tradez\main` -- Mirror (backup/sync only): `C:\DarkModder33\main` - -Use this model to prevent IDE and pipeline drift. - -## Canonical Rules - -- Open `C:\tradez\main` for coding, running, and deployments. -- Run pipeline tasks from the canonical repo. -- Push from canonical only. - -## Mirror Rules - -- Do not make feature edits in mirror. -- Keep mirror synchronized from canonical. -- Use mirror as rollback/reference copy. - -## Commands - -- Repo status across both paths: - - `npm run repo:status` -- Sync mirror from canonical: - - `npm run repo:sync-mirror` - -Both commands are also available in VS Code Tasks: -- `TradeHax: Repo Status` -- `TradeHax: Sync Mirror` - -## Suggested Daily Loop - -1. Open canonical workspace. -2. `TradeHax: Local Pipeline` before commit. -3. Commit and push from canonical. -4. Run `TradeHax: Sync Mirror` to keep backup clone aligned. diff --git a/MONETIZATION_GUIDE.md b/MONETIZATION_GUIDE.md deleted file mode 100644 index e3b4120b..00000000 --- a/MONETIZATION_GUIDE.md +++ /dev/null @@ -1,413 +0,0 @@ -# πŸ’° Complete Monetization Setup Guide - -## Overview -This guide covers all revenue streams for tradehaxai.tech and how to set them up. - ---- - -## Revenue Stream 1: Google AdSense (Display Advertising) - -### Expected Revenue -- **10,000 monthly visitors:** $50-150/month -- **50,000 monthly visitors:** $250-750/month -- **100,000 monthly visitors:** $500-1,500/month - -*Revenue varies by niche, traffic quality, and ad placement* - -### Setup Steps - -#### Step 1: Apply for Google AdSense -1. Go to https://adsense.google.com -2. Click **Get Started** -3. Sign in with Google account -4. Enter website URL: `tradehaxai.tech` -5. Select your country/region -6. Review and accept Terms of Service -7. Click **Start Using AdSense** - -#### Step 2: Add Your Site -1. In AdSense dashboard, go to **Sites** -2. Click **Add Site** -3. Enter: `tradehaxai.tech` -4. Click **Save and Continue** - -#### Step 3: Get AdSense Code -1. AdSense will provide a verification code snippet -2. Copy the Publisher ID (looks like: `ca-pub-XXXXXXXXXXXXXXXX`) -3. The code is already integrated in this project via the AdSense component - -#### Step 4: Add Publisher ID to Vercel -1. Go to Vercel Dashboard β†’ Your Project β†’ Settings β†’ Environment Variables -2. Add new variable: - - **Key:** `NEXT_PUBLIC_ADSENSE_ID` - - **Value:** `ca-pub-XXXXXXXXXXXXXXXX` (your actual Publisher ID) - - **Environments:** Production -3. Click **Save** -4. Redeploy your site - -#### Step 5: Submit for Review -1. After adding the code and redeploying, go back to AdSense dashboard -2. Click **Request Review** -3. Google will review your site (typically 1-7 days) -4. You'll receive email when approved - -#### Step 6: Create Ad Units (After Approval) -1. In AdSense dashboard, go to **Ads** β†’ **By Site** -2. Click **New Ad Unit** -3. Select **Display ads** -4. Name it: "Sidebar Banner" (or similar) -5. Copy the Ad Slot ID -6. Add AdSense component to your pages with this slot ID - -### Best Ad Placements -- βœ… Above the fold (top of page) -- βœ… Within content (between sections) -- βœ… Sidebar (desktop) -- βœ… End of article/page -- ❌ Avoid too many ads (affects user experience) - ---- - -## Revenue Stream 2: Solana Transaction Fees - -### Expected Revenue -- Depends entirely on user activity -- Each counter increment/decrement transfers 0.001 SOL -- If 100 users interact 10 times/day = 1 SOL/day (~$150-250/day at current rates) - -### Setup (Already Configured!) -Your Solana program already collects fees: -- Users pay 0.001 SOL per increment -- Funds go to user-specific vault PDAs -- Program controls these vaults - -### How to Track Revenue -1. Check program vault balances using Solana Explorer -2. Monitor transaction volume in Vercel Analytics -3. Track user engagement via Google Analytics - -### How to Withdraw Funds -The program needs an admin withdraw function (consider adding in future PR): -```rust -pub fn withdraw_fees(ctx: Context, amount: u64) -> Result<()> { - // Add admin-only withdrawal logic -} -``` - ---- - -## Revenue Stream 3: Affiliate Marketing - -### Expected Revenue -- **Crypto Exchanges:** 20-50% commission on trading fees -- **High potential:** If 100 users sign up and trade = $500-2000/month -- **Passive income:** Earn from their lifetime trading activity - -### Top Affiliate Programs - -#### 1. Binance Affiliate Program -- **Commission:** Up to 50% of trading fees -- **Cookie Duration:** Lifetime -- **Sign up:** https://www.binance.com/en/activity/affiliate -- **Payout:** Monthly, minimum $10 - -#### 2. Coinbase Affiliate Program -- **Commission:** $10 per sign-up (after $100 in trading) -- **Cookie Duration:** 30 days -- **Sign up:** https://www.coinbase.com/affiliates -- **Payout:** Monthly via PayPal - -#### 3. Phantom Wallet -- **Commission:** Variable -- **Great fit:** Your users already need Solana wallets! -- **Sign up:** Contact Phantom team - -#### 4. TradingView -- **Commission:** 50% of subscription for 12 months -- **Perfect for:** Trading platform users -- **Sign up:** https://www.tradingview.com/partner-program/ - -### Implementation -Add affiliate links to: -- Footer -- Dashboard sidebar -- "Get Started" tutorial -- Blog posts (create content around trading) - -Example component (**components/AffiliateLinks.tsx**): -```typescript -export function AffiliateLinks() { - return ( - - ); -} -``` - ---- - -## Revenue Stream 4: Email Marketing - -### Expected Revenue -- Build email list β†’ promote affiliate offers -- Average: $1-5 per subscriber per month -- 1,000 subscribers = $1,000-5,000/month potential - -### Email Service Providers - -#### Option 1: Mailchimp (Beginner-Friendly) -- **Free tier:** Up to 500 subscribers -- **Cost:** $13/month for 500-1,000 subscribers -- **Setup:** - 1. Sign up at https://mailchimp.com - 2. Create audience list - 3. Get API key (Account β†’ Extras β†’ API Keys) - 4. Get List ID (Audience β†’ Settings β†’ Unique ID) - 5. Add to Vercel environment variables: - - `MAILCHIMP_API_KEY` - - `MAILCHIMP_LIST_ID` - -#### Option 2: ConvertKit (Creator-Focused) -- **Free tier:** Up to 1,000 subscribers -- **Cost:** $29/month for advanced features -- **Setup:** Similar to Mailchimp - -### Update API Route -Edit `app/api/subscribe/route.ts` to integrate with Mailchimp: - -```typescript -import { NextResponse } from 'next/server'; - -export async function POST(request: Request) { - try { - const { email } = await request.json(); - - const MAILCHIMP_API_KEY = process.env.MAILCHIMP_API_KEY; - const MAILCHIMP_LIST_ID = process.env.MAILCHIMP_LIST_ID; - const MAILCHIMP_DC = MAILCHIMP_API_KEY?.split('-')[1]; // e.g., 'us1' - - const response = await fetch( - `https://${MAILCHIMP_DC}.api.mailchimp.com/3.0/lists/${MAILCHIMP_LIST_ID}/members`, - { - method: 'POST', - headers: { - Authorization: `Bearer ${MAILCHIMP_API_KEY}`, - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ - email_address: email, - status: 'subscribed', - }), - } - ); - - if (response.ok) { - return NextResponse.json({ success: true }); - } else { - const error = await response.json(); - return NextResponse.json({ error: error.detail }, { status: 400 }); - } - } catch (error) { - return NextResponse.json({ error: 'Failed to subscribe' }, { status: 500 }); - } -} -``` - ---- - -## Revenue Stream 5: Premium Features (Future) - -### Subscription Model with Stripe - -#### Potential Premium Features -- Advanced trading signals ($19/month) -- Portfolio analytics ($29/month) -- Automated trading bots ($49/month) -- Priority support ($9/month) - -#### Setup Stripe -1. Sign up at https://stripe.com -2. Get API keys (Dashboard β†’ Developers β†’ API Keys) -3. Add to Vercel environment variables: - - `STRIPE_SECRET_KEY` - - `NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY` -4. Install Stripe SDK: `npm install stripe @stripe/stripe-js` - -#### Create Subscription Products -1. In Stripe Dashboard β†’ Products β†’ Add Product -2. Create subscription plans with monthly/annual pricing -3. Get Product IDs -4. Implement checkout flow - ---- - -## Analytics & Tracking - -### Google Analytics (Already Integrated) -Monitor these key metrics: -- **Visitors:** Daily/monthly traffic -- **Bounce Rate:** Keep below 60% -- **Session Duration:** Aim for 2+ minutes -- **Conversions:** Track sign-ups, clicks - -### Vercel Analytics (Already Integrated) -Track: -- **Real-time visitors** -- **Page load performance** -- **Top pages** -- **Geographic distribution** - -### Create Goals in Google Analytics -1. Go to Google Analytics β†’ Admin β†’ Goals -2. Create goals for: - - Email subscriptions - - Affiliate link clicks - - Wallet connections - - Trading interactions - ---- - -## Revenue Projections - -### Scenario 1: 10,000 Monthly Visitors -- AdSense: $100/month -- Affiliates (2% conversion): $200/month -- Email (500 subscribers): $500/month -- **Total: ~$800/month** - -### Scenario 2: 50,000 Monthly Visitors -- AdSense: $500/month -- Affiliates (2% conversion): $1,000/month -- Email (2,500 subscribers): $2,500/month -- **Total: ~$4,000/month** - -### Scenario 3: 100,000 Monthly Visitors + Premium -- AdSense: $1,000/month -- Affiliates (2% conversion): $2,000/month -- Email (5,000 subscribers): $5,000/month -- Premium (100 users @ $29): $2,900/month -- **Total: ~$10,900/month** - ---- - -## Action Plan Timeline - -### Week 1: Setup Analytics -- [x] Deploy site to tradehaxai.tech -- [ ] Set up Google Analytics -- [ ] Enable Vercel Analytics -- [ ] Track baseline traffic - -### Week 2: Apply for AdSense -- [ ] Apply for Google AdSense -- [ ] Wait for approval (1-7 days) -- [ ] Add ad placements once approved - -### Week 3: Affiliate Marketing -- [ ] Join Binance affiliate program -- [ ] Join Coinbase affiliate program -- [ ] Add affiliate links to site -- [ ] Create content to drive affiliate conversions - -### Week 4: Email Marketing -- [ ] Set up Mailchimp account -- [ ] Integrate API with subscribe endpoint -- [ ] Add email capture to all pages -- [ ] Create welcome email sequence - -### Month 2-3: Content & SEO -- [ ] Write blog posts about Solana trading -- [ ] Create tutorials for your platform -- [ ] Build backlinks -- [ ] Optimize for "Solana trading platform" keywords - -### Month 4+: Premium Features -- [ ] Set up Stripe -- [ ] Develop premium features -- [ ] Launch subscription plans -- [ ] Market to existing users - ---- - -## Optimization Tips - -### Increase Ad Revenue -1. Place ads above the fold -2. Use responsive ad units -3. Test different ad formats -4. Monitor RPM (revenue per thousand impressions) -5. Block low-paying ad categories - -### Increase Affiliate Revenue -1. Write comparison articles (e.g., "Best Crypto Exchanges 2026") -2. Create video tutorials mentioning affiliate tools -3. Add affiliate links in email newsletters -4. Use compelling CTAs ("Get $10 Bonus") - -### Grow Email List -1. Offer lead magnet (e.g., "Free Trading Strategy PDF") -2. Add popup (exit-intent) -3. Promote in social media -4. Create gated content - -### Drive More Traffic -1. SEO optimization (target keywords) -2. Social media marketing (Twitter, Reddit, Discord) -3. Content marketing (blog posts) -4. Paid ads (Google Ads, Twitter Ads) -5. Partnerships with crypto influencers - ---- - -## Legal Requirements - -### Disclosures -Add to your site footer and affiliate pages: -``` -"TradeHax AI is a participant in affiliate programs and may earn -commissions from qualifying purchases made through links on this site." -``` - -### Privacy Policy -Required for AdSense and email collection. -Use generator: https://www.termsfeed.com/privacy-policy-generator/ - -### Terms of Service -Protect yourself legally. -Use generator: https://www.termsfeed.com/terms-service-generator/ - ---- - -## Support & Resources - -- **AdSense Help:** https://support.google.com/adsense -- **Affiliate Marketing Guide:** https://neilpatel.com/what-is-affiliate-marketing/ -- **Email Marketing Best Practices:** https://mailchimp.com/resources/ -- **Stripe Documentation:** https://stripe.com/docs - ---- - -**Ready to start earning? Begin with Week 1 setup and track your progress!** diff --git a/NAMECHEAP_CPANEL_DEPLOYMENT.md b/NAMECHEAP_CPANEL_DEPLOYMENT.md deleted file mode 100644 index da9d4853..00000000 --- a/NAMECHEAP_CPANEL_DEPLOYMENT.md +++ /dev/null @@ -1,520 +0,0 @@ -# πŸš€ TradeHax Namecheap cPanel Deployment Guide - -## Quick Reference - -**Server Details:** -- Host: `199.188.201.164` -- User: `traddhou` -- Domain: `https://tradehax.net` -- cPanel: `https://business188.namecheaphosting.com:2083` -- App Root: `/home/traddhou/public_html` -- Node Version: `20.x` -- Port: `3000` - ---- - -## 5-Minute Quick Start - -```bash -# 1. Generate deployment automation guide -node scripts/namecheap-cpanel-deployment.js - -# 2. Review the generated guide - -# 3. Run automated deployment -bash scripts/deploy-to-namecheap.sh - -# 4. Verify -https://tradehax.net -``` - ---- - -## Step-by-Step Deployment - -### Step 1: Prepare Application - -```bash -# Ensure latest build -npm run build - -# Verify standalone build exists -ls -la .next/standalone/server.js - -# Create deployment directory -mkdir -p deployment -cp -r .next public package.json .env.example ecosystem.config.js deployment/ -``` - -### Step 2: SSH & Upload (Choose One) - -#### Option A: SSH Git Clone (Easiest) - -```bash -ssh traddhou@199.188.201.164 - -cd /home/traddhou/public_html - -# Backup existing (if any) -mv main main.backup.$(date +%s) 2>/dev/null || true - -# Clone -git clone https://github.com/DarkModder33/main.git . - -# Install & build -npm install --production -npm run build - -# Set permissions -chmod 600 .env -``` - -#### Option B: File Manager Upload - -1. Go to: cPanel β†’ File Manager -2. Navigate to: `/home/traddhou/public_html` -3. Upload ZIP file -4. Right-click β†’ Extract -5. Verify files present - -#### Option C: SFTP (FileZilla) - -``` -Host: ftp.tradehax.net -User: traddhou -Port: 21 (FTP) or 22 (SFTP) -``` - -### Step 3: Configure Node.js App in cPanel - -1. **Go to:** cPanel β†’ Software β†’ Setup Node.js App -2. **Click:** Create Application -3. **Fill in:** - - Node.js version: `20.x` - - Application mode: `Production` - - Application root: `/home/traddhou/public_html` - - Application URL: `https://tradehax.net` - - Application startup file: `.next/standalone/server.js` - -4. **Add Environment Variables:** - - Click "Add Variable" for each: - - ``` - NODE_ENV = production - PORT = 3000 - NEXT_PUBLIC_SITE_URL = https://tradehax.net - - HF_API_TOKEN = hf_xxxxxxxxxxxxxxx - HF_MODEL_ID = mistralai/Mistral-7B-Instruct-v0.1 - HF_IMAGE_MODEL_ID = stabilityai/stable-diffusion-2-1 - - NEXT_PUBLIC_ENABLE_PAYMENTS = true - STRIPE_SECRET_KEY = sk_live_xxxxx - - NEXTAUTH_SECRET = [generate: openssl rand -base64 32] - NEXTAUTH_URL = https://tradehax.net - ``` - -5. **Click:** Save -6. **Click:** Run NPM Install -7. **Click:** Start Application - -### Step 4: Configure Apache & HTTPS - -**Via File Manager:** - -1. Navigate to: `/home/traddhou/public_html` -2. Create/Edit: `.htaccess` - -```apache - -RewriteEngine On - -# Proxy to Node.js app -RewriteBase / -RewriteRule ^$ http://127.0.0.1:3000/ [P,L] -RewriteCond %{REQUEST_FILENAME} !-f -RewriteCond %{REQUEST_FILENAME} !-d -RewriteRule ^(.*)$ http://127.0.0.1:3000/$1 [P,L] - -# Force HTTPS -RewriteCond %{HTTPS} off -RewriteRule ^(.*)$ https://%{HTTP_HOST}$1 [R=301,L] - -# Security headers - - Header set X-Frame-Options "SAMEORIGIN" - Header set X-Content-Type-Options "nosniff" - Header set X-XSS-Protection "1; mode=block" - Header set Referrer-Policy "strict-origin-when-cross-origin" - - -``` - -### Step 5: Setup PM2 Process Management (SSH) - -```bash -ssh traddhou@199.188.201.164 - -cd /home/traddhou/public_html - -# Install PM2 -npm install -g pm2 - -# Create ecosystem config -cat > ecosystem.config.js << 'EOF' -module.exports = { - apps: [{ - name: 'tradehax', - script: './.next/standalone/server.js', - instances: 1, - exec_mode: 'cluster', - watch: false, - max_memory_restart: '500M', - env: { - NODE_ENV: 'production', - PORT: 3000, - }, - error_file: 'logs/err.log', - out_file: 'logs/out.log', - }], -}; -EOF - -# Create logs directory -mkdir -p logs - -# Start with PM2 -pm2 start ecosystem.config.js - -# Save and enable startup -pm2 save -pm2 startup -``` - -### Step 6: Verify Deployment - -```bash -# Check status -pm2 status - -# View logs -pm2 logs tradehax - -# Monitor resources -pm2 monit - -# Test application -curl https://tradehax.net -``` - -Visit: **https://tradehax.net** -- Should load in ~30 seconds (first time) -- Should show HTTPS lock πŸ”’ -- No errors in browser console - ---- - -## Testing Endpoints - -### Test Text Generation - -```bash -curl -X POST https://tradehax.net/api/hf-server \ - -H "Content-Type: application/json" \ - -d '{ - "prompt": "Give me a concise BTC/ETH market brief.", - "task": "text-generation" - }' -``` - -Expected response: -```json -{ - "output": [ - { - "generated_text": "..." - } - ] -} -``` - -### Test Image Generation - -```bash -curl -X POST https://tradehax.net/api/hf-server \ - -H "Content-Type: application/json" \ - -d '{ - "prompt": "Trading chart with candlestick pattern", - "task": "image-generation" - }' -``` - -Expected: Image blob returned - ---- - -## Monitoring & Logs - -### Via SSH - -```bash -# Stream logs -pm2 logs tradehax - -# View specific number of lines -pm2 logs tradehax --lines 100 - -# Monitor resources -pm2 monit - -# Get process info -pm2 info tradehax - -# Check if running -pm2 status -``` - -### Via cPanel - -1. **cPanel β†’ Metrics:** - - CPU Usage - - Memory Usage - - Bandwidth - - Errors - -2. **cPanel β†’ Error Log:** - - Tail errors - - Search for issues - ---- - -## Environment Variables Reference - -| Variable | Value | Required | Notes | -|----------|-------|----------|-------| -| `NODE_ENV` | `production` | βœ… | Must be production | -| `PORT` | `3000` | βœ… | Default Node port | -| `NEXT_PUBLIC_SITE_URL` | `https://tradehax.net` | βœ… | For NextAuth | -| `HF_API_TOKEN` | `hf_xxx...` | βœ… | From HF Hub | -| `HF_MODEL_ID` | `mistralai/Mistral-7B-Instruct-v0.1` | βœ… | Base model | -| `HF_IMAGE_MODEL_ID` | `stabilityai/stable-diffusion-2-1` | βœ… | Image model | -| `NEXT_PUBLIC_ENABLE_PAYMENTS` | `true` | βœ… | Enable monetization | -| `STRIPE_SECRET_KEY` | `sk_live_xxx` | ⚠️ | If using Stripe | -| `NEXTAUTH_SECRET` | Random 32-byte string | βœ… | Security key | -| `NEXTAUTH_URL` | `https://tradehax.net` | βœ… | Auth URL | -| `DATABASE_URL` | Connection string | ⚠️ | If using database | - ---- - -## Troubleshooting - -### Application Won't Start - -```bash -# Check logs -pm2 logs tradehax - -# Check file exists -ls -la .next/standalone/server.js - -# Restart -pm2 restart tradehax - -# Force rebuild -npm run build -pm2 restart tradehax -``` - -### 502 Bad Gateway - -```bash -# Check if Node app is running -pm2 status - -# Check Apache mod_proxy -# cPanel > Apache Modules > search "proxy" - -# Verify .htaccess proxy rules -cat .htaccess - -# Test direct connection -curl http://127.0.0.1:3000 -``` - -### HTTPS Not Working - -```bash -# Check certificate -# cPanel > SSL/TLS > Status should be "ACTIVE" - -# Renew if needed -# cPanel > SSL/TLS > AutoSSL > Check - -# Force HTTPS redirect should be in .htaccess -grep "Force HTTPS" .htaccess -``` - -### HF API Token Error - -```bash -# Verify token is set -pm2 env tradehax | grep HF_API_TOKEN - -# Test token validity -curl -H "Authorization: Bearer $HF_API_TOKEN" \ - https://huggingface.co/api/whoami - -# Get new token from -# https://huggingface.co/settings/tokens - -# Update in cPanel and restart -pm2 restart tradehax -``` - -### High Memory Usage - -```bash -# Check memory -pm2 monit - -# Restart with memory limit -# Edit ecosystem.config.js: max_memory_restart: '300M' -pm2 restart tradehax - -# Set cron job to restart daily -crontab -e -# Add: 0 2 * * * pm2 restart tradehax -``` - ---- - -## Maintenance - -### Daily - -- Check: `pm2 status tradehax` -- Monitor: `pm2 logs tradehax` for errors - -### Weekly - -- Review: cPanel > Metrics > CPU/Memory -- Check: API response times -- Verify: HTTPS certificate valid - -### Monthly - -- Update dependencies: `npm update` -- Review: Application logs -- Backup: cPanel > Backups - -### Quarterly - -- Test failover: Restart app -- Review: Security settings -- Optimize: Performance settings - ---- - -## Scaling Considerations - -### Current Setup Limitations - -- Single instance on shared hosting -- Limited CPU/Memory -- Suitable for: < 1000 daily users -- HF inference: Rate limited by free tier - -### When to Upgrade - -**Upgrade to VPS if:** -- Consistent high CPU usage (> 80%) -- Memory frequently > 400MB -- Response times > 2 seconds -- More than 5000 daily users - -**Recommended VPS Setup:** -- Node: 4+ vCPU -- Memory: 4-8 GB -- Storage: 50GB SSD -- Dedicated database - ---- - -## Rollback Procedure - -If deployment fails: - -```bash -ssh traddhou@199.188.201.164 - -cd /home/traddhou/public_html - -# Stop current app -pm2 stop tradehax - -# Restore backup -rm -rf .next node_modules package-lock.json -mv main.backup.[timestamp] main -# OR -git checkout HEAD -- . - -# Rebuild -npm install --production -npm run build - -# Start -pm2 start tradehax -``` - ---- - -## Support & Contact - -**For deployment issues:** -- Email: darkmodder33@proton.me -- GitHub: https://github.com/DarkModder33/main - -**Helpful Commands:** - -```bash -# Generate full deployment guide -node scripts/namecheap-cpanel-deployment.js - -# SSH quick connect -ssh traddhou@199.188.201.164 - -# Quick diagnostics -pm2 status -pm2 logs tradehax --lines 50 -pm2 monit - -# Restart everything -pm2 restart tradehax -pm2 save -``` - ---- - -## Success Criteria - -βœ… **Deployment Complete When:** - -- [ ] Application loads at https://tradehax.net -- [ ] HTTPS certificate valid (πŸ”’ showing) -- [ ] Response time < 3 seconds -- [ ] `/api/hf-server` returns 200 -- [ ] Text generation works -- [ ] Image generation works -- [ ] cPanel metrics show normal usage -- [ ] pm2 logs show no errors -- [ ] Payments flag enabled - ---- - -**Status:** πŸš€ Ready to Deploy - -**Latest:** See `scripts/namecheap-cpanel-deployment.js` for full automation - -**Time to Live:** 10-30 minutes (depending on method) diff --git a/NAMECHEAP_MIGRATION_CHECKLIST.md b/NAMECHEAP_MIGRATION_CHECKLIST.md deleted file mode 100644 index 09d557b6..00000000 --- a/NAMECHEAP_MIGRATION_CHECKLIST.md +++ /dev/null @@ -1,80 +0,0 @@ -# Namecheap Migration Checklist (What You Still Need To Complete) - -This checklist is the minimum remaining manual work after automation scaffolding. - -## 1) GitHub Secrets (required) - -In **GitHub β†’ Settings β†’ Secrets and variables β†’ Actions**, add: - -- `NAMECHEAP_VPS_HOST` -- `NAMECHEAP_VPS_USER` -- `NAMECHEAP_VPS_SSH_KEY` - -Optional but recommended: - -- `NAMECHEAP_VPS_PORT` (default 22) -- `NAMECHEAP_APP_ROOT` (default `/var/www/tradehax`) -- `NAMECHEAP_APP_PORT` (default `3000`) - -## 2) Server bootstrap (one-time) - -On the VPS: - -1. Clone or upload repo once. -2. Run: `bash deploy/namecheap/bootstrap-server.sh` -3. Copy `deploy/namecheap/nginx.tradehax.conf` into `/etc/nginx/sites-available/tradehax.conf` -4. Enable site and restart Nginx. -5. Install certificates (certbot) for: - - tradehax.net - - www.tradehax.net - - tradehaxai.tech - - www.tradehaxai.tech - - tradehaxai.me - - www.tradehaxai.me -6. Create server env file from `deploy/namecheap/env.production.example`: - - `/var/www/tradehax/shared/.env.production` - -## 3) DNS cutover at Namecheap - -Set records: - -- `A` record `@` β†’ **your VPS public IP** -- `CNAME` `www` β†’ `tradehax.net` - -For `tradehaxai.tech` and `tradehaxai.me`, either: -- point both to same VPS, or -- URL-redirect to canonical `tradehax.net` - -Use TTL = 300 during cutover. - -## 4) Cron replacement (replaces Vercel cron) - -Run on VPS after app is live: - -```bash -BASE_URL=https://tradehax.net TRADEHAX_CRON_SECRET='' bash deploy/namecheap/setup-cron.sh -``` - -## 5) Trigger deploy - -- Push to `main`, or run GitHub workflow manually: **Deploy to Namecheap VPS**. -- Validate: - - Home page loads - - Health endpoint responds - - Auth and key API routes function - -## 6) Disable Vercel remnants - -Already automated in repo: -- Vercel deploy workflow no longer auto-runs. - -Still manual in Vercel dashboard: -- Remove custom domains -- Remove/deactivate project -- Remove billing method if no longer needed - -## 7) Final verification - -- `https://tradehax.net` returns 200 and app content -- cron endpoints receive successful entries in `/var/log/tradehax-cron.log` -- GitHub deploy workflow succeeds end-to-end diff --git a/PERMISSIVE_CONFIG.md b/PERMISSIVE_CONFIG.md deleted file mode 100644 index 5c6fe375..00000000 --- a/PERMISSIVE_CONFIG.md +++ /dev/null @@ -1,129 +0,0 @@ -# Permissive Development Configuration - -This repository has been configured with the most permissive settings for maximum development flexibility and automation. - -## What Has Been Configured - -### 1. **VS Code Settings** (`.vscode/settings.json`) -- βœ… Copilot & AI features enabled for all file types -- βœ… Auto-save enabled (1 second delay) -- βœ… Format on save and paste enabled -- βœ… Auto-import and code actions on save -- βœ… Git auto-stash, auto-fetch, no confirmations -- βœ… Force push allowed -- βœ… Workspace trust disabled (no security warnings) -- βœ… Terminal confirmations disabled - -### 2. **VS Code Extensions** (`.vscode/extensions.json`) -- βœ… All recommended extensions defined -- βœ… GitHub Copilot & Copilot Chat -- βœ… ESLint, Prettier, GitLens -- βœ… React, TypeScript, Tailwind CSS support -- βœ… Solana development tools - -### 3. **TypeScript Configuration** (`tsconfig.json`) -- βœ… Strict mode disabled -- βœ… All type checking made optional -- βœ… Unused variables/parameters allowed -- βœ… Implicit any allowed -- βœ… Unreachable code allowed - -### 4. **Next.js Configuration** (`next.config.ts`) -- βœ… React Strict Mode disabled -- βœ… ESLint ignored during builds -- βœ… TypeScript errors ignored during builds -- βœ… All experimental features enabled -- βœ… Remote image patterns allow all HTTPS sources -- βœ… Webpack fallbacks configured - -### 5. **ESLint Configuration** (`eslint.config.mjs`) -- βœ… All strict rules disabled -- βœ… TypeScript any type allowed -- βœ… Unused variables allowed -- βœ… Console logs allowed -- βœ… React hooks warnings only (not errors) - -### 6. **Git Configuration** -- βœ… Git hooks disabled (`core.hooksPath=/dev/null`) -- βœ… No pre-commit validation -- βœ… No commit message validation -- βœ… Automatic line ending handling (`.gitattributes`) - -### 7. **Editor Configuration** (`.editorconfig`) -- βœ… Consistent indentation (2 spaces) -- βœ… UTF-8 encoding -- βœ… LF line endings -- βœ… Trim trailing whitespace - -### 8. **NPM Scripts** (`package.json`) -- βœ… `npm run lint:fix` - Auto-fix linting issues -- βœ… `npm run clean` - Clean build artifacts -- βœ… `npm run reinstall` - Fresh install of dependencies -- βœ… `npm run deploy:build` - Clean build for deployment - -## Benefits - -### πŸš€ **Maximum Automation** -- Auto-save, auto-format, auto-fix on save -- No manual intervention required for code style -- Automatic imports organization - -### πŸ”“ **Permissive Development** -- No TypeScript strict checking blocking development -- ESLint won't block commits or builds -- Git hooks won't interrupt workflow -- Force push enabled for easy history management - -### πŸ› οΈ **IDE Integration** -- All recommended extensions auto-suggested -- Copilot enabled everywhere for AI assistance -- GitLens for enhanced git visibility -- Tailwind CSS IntelliSense - -### ⚑ **Fast Iteration** -- Turbopack for faster dev builds -- No unnecessary validation slowing you down -- Build errors won't stop the build process - -## Usage - -### Development -```bash -npm run dev # Start development server with Turbopack -npm run lint:fix # Fix all auto-fixable linting issues -npm run type-check # Check types without blocking -``` - -### Production -```bash -npm run build # Build for production (ignores errors) -npm run deploy:build # Clean build for deployment -``` - -### Maintenance -```bash -npm run clean # Clean build artifacts -npm run reinstall # Fresh install of dependencies -``` - -## Security Note - -⚠️ **Important**: These permissive settings are designed for rapid development and iteration. Consider re-enabling some safety checks before deploying to production: - -- TypeScript strict mode -- ESLint error-level rules -- Git hooks for validation -- React Strict Mode - -## Customization - -All settings can be adjusted in their respective configuration files: -- VS Code: `.vscode/settings.json` -- TypeScript: `tsconfig.json` -- Next.js: `next.config.ts` -- ESLint: `eslint.config.mjs` -- Git: `.gitattributes`, `.gitconfig` - ---- - -Happy coding! πŸŽ‰ diff --git a/PIPELINE_QUICKSTART.md b/PIPELINE_QUICKSTART.md deleted file mode 100644 index c826dcf9..00000000 --- a/PIPELINE_QUICKSTART.md +++ /dev/null @@ -1,32 +0,0 @@ -# TradeHax Local Pipeline Quickstart - -A clean, repeatable local workflow from repository root (`c:\tradez\main`). - -## Core Commands - -- `npm run hf:automation` - Runs the full automation orchestrator (validation + setup guidance). - -- `npm run hf:validate` - Runs deployment/environment validation checks. - -- `npm run hf:pipeline` - Runs `hf:validate` + code quality checks (`lint` + `type-check`). - -- `npm run hf:pipeline:strict` - Runs `hf:pipeline` + production build (`next build`). - -- `npm run hf:test-inference` - Runs inference endpoint tests (supports `TRADEHAX_TEST_BASE_URL`). - -## Suggested Daily Flow - -1. `npm run hf:pipeline` -2. If release day: `npm run hf:pipeline:strict` -3. If deploying model/API changes: `npm run hf:test-inference` - -## Notes - -- If your active deployment domain is not `tradehax.net`, set: - - `TRADEHAX_TEST_BASE_URL=https://` -- Validation warnings for unset local secrets are expected on fresh environments. diff --git a/QUICK_START.md b/QUICK_START.md deleted file mode 100644 index 1cd481c2..00000000 --- a/QUICK_START.md +++ /dev/null @@ -1,230 +0,0 @@ -# Quick Start Guide - Astral Awakening: TradeHax - -Get up and running in 15 minutes (after SHAMROCK token setup). - -## ⚑ 5-Minute Backend Setup - -### 1. Install and Deploy -```bash -cd tradehax-backend -npm install - -# Test locally -npm run dev -# Visit http://localhost:3001/api/health -``` - -### 2. Deploy to Vercel -```bash -npm install -g vercel -vercel login -vercel --prod -``` - -### 3. Add Env Variables in Vercel Dashboard -- `SHAMROCK_MINT` - Your token mint pubkey -- `AUTHORITY_SECRET` - Authority keypair array -- `SOLANA_RPC` - `https://api.devnet.solana.com` -- (Optional) MongoDB, Twitter API keys - -**Save your backend URL** (e.g., `https://your-app.vercel.app`) - ---- - -## ⚑ 5-Minute Frontend Setup - -### 1. Install -```bash -cd tradehax-frontend -npm install -``` - -### 2. Configure -```bash -cp .env.example .env -# Edit .env with your backend URL -VITE_BACKEND_URL=https://your-backend.vercel.app -``` - -### 3. Test Locally -```bash -npm run dev -# Visit http://localhost:5173 -``` -- Connect wallet (use Phantom) -- Try collecting clovers (WASD to move) -- Test tweet reward - -### 4. Deploy to GitHub Pages -```bash -npm run build -# Copy dist/ to your hosting or use: -vercel --prod -``` - ---- - -## ⚑ 5-Minute SHAMROCK Token Setup (if you haven't done this yet) - -### Quick Version -```bash -# 1. Install Solana CLI -sh -c "$(curl -sSfL https://release.solana.com/v1.18.22/install)" - -# 2. Set to devnet -solana config set --url https://api.devnet.solana.com - -# 3. Create authority wallet -solana-keygen new --outfile ~/my-wallets/authority-keypair.json - -# 4. Airdrop SOL -solana airdrop 2 ~/my-wallets/authority-keypair.json --url devnet - -# 5. Create mint -solana-keygen new --outfile mint-keypair.json -MINT_PUBKEY=$(solana-keygen pubkey mint-keypair.json) - -# 6. Get rent cost -RENT=$(solana rent 82 --url devnet | grep "Minimum balance" | awk '{print $4}') - -# 7. Fund and initialize -solana transfer ~/my-wallets/authority-keypair.json $MINT_PUBKEY $RENT \ - --allow-unfunded-recipient --signer mint-keypair.json --url devnet - -spl-token initialize-mint mint-keypair.json \ - --decimals 9 \ - --mint-authority ~/my-wallets/authority-keypair.json \ - --url devnet - -# 8. Create account and mint supply -TOKEN_ACCOUNT=$(spl-token create-account $MINT_PUBKEY \ - --owner ~/my-wallets/authority-keypair.json --url devnet | tail -1) - -spl-token mint $MINT_PUBKEY 1000000000000000 $TOKEN_ACCOUNT --url devnet - -# 9. Save these: -echo "SHAMROCK_MINT=$MINT_PUBKEY" -cat ~/my-wallets/authority-keypair.json # For AUTHORITY_SECRET -``` - -See [SHAMROCK_SETUP.md](./SHAMROCK_SETUP.md) for detailed instructions. - ---- - -## 🎠Test the Game - -### 1. Connect Wallet -- Click "Connect Phantom" button -- Approve in Phantom extension - -### 2. Play -- Use **W/A/S/D** to move -- Collect **5 clovers** (glowing purple circles) -- Reach **100 energy** to unlock portal - -### 3. Earn SHAMROCK -- Click "Tweet Quest: Earn 100 SHAMROCK" -- Post to X (Twitter) with `#HyperboreaAscent` -- Paste your tweet URL -- Get 100 tokens instantly! - -### 4. Mint NFT Skins (New!) -- Click **"🎨 Mint NFT Skin"** button -- Browse 5 exclusive deity-themed skins -- Click to preview details (god/goddess, element, rarity) -- Click **"✨ Mint for 10 SHAMROCK"** -- Confirm transaction in wallet -- NFT appears in wallet (10-30 seconds) -- View on Solana Explorer -- Transfer on NFT marketplaces! - ---- - -## πŸ“‹ Deployment Checklist - -- [ ] SHAMROCK token created on devnet -- [ ] Backend deployed to Vercel -- [ ] Backend environment variables set (including Metaplex for NFTs) -- [ ] Frontend `.env` configured with backend URL -- [ ] Frontend deployed (GitHub Pages or Vercel) -- [ ] Wallet connects successfully -- [ ] Clovers collect and grant energy -- [ ] Tweet reward system works (100 SHAMROCK per tweet) -- [ ] NFT mint panel opens and displays skins -- [ ] Can mint NFT and burn 10 SHAMROCK -- [ ] Minted NFT appears in wallet -- [ ] PWA installable on mobile - ---- - -## πŸ› Quick Troubleshooting - -### Backend won't deploy -```bash -# Check if everything is there -ls tradehax-backend/ -# Should have: package.json, api/index.js, vercel.json -``` - -### Frontend won't connect to backend -```bash -# Test backend is working -curl https://your-backend.vercel.app/api/health -# Should return: {"status":"ok",...} - -# Check .env file -cat tradehax-frontend/.env -# Should have correct VITE_BACKEND_URL -``` - -### Wallet won't connect -- Install Phantom: https://phantom.app -- Make sure you're on devnet in Phantom -- Try refreshing the page - -### Rewards not working -- Verify tweet contains `#HyperboreaAscent` -- Check backend logs in Vercel -- Confirm SHAMROCK_MINT is correct -- Check authority wallet has SOL - ---- - -## πŸ“± Mobile/PWA Setup - -1. Open game on mobile browser -2. Tap share β†’ "Add to Home Screen" -3. Game is now installed as app -4. Works offline (with cached assets) - ---- - -## πŸš€ Next Steps - -1. **Customize**: Edit colors, game mechanics, rewards in code -2. **Launch**: Share #HyperboreaAscent link -3. **Promote**: Tweet about game with rewards -4. **Scale**: Add more features and game modes -5. **Monetize**: Add in-app purchases, NFT cosmetics -6. **Mainnet**: Switch SHAMROCK to mainnet after testing - ---- - -## πŸ“š Documentation - -- **Detailed Setup**: [SHAMROCK_SETUP.md](./SHAMROCK_SETUP.md) -- **Backend Docs**: [tradehax-backend/README.md](./tradehax-backend/README.md) -- **Frontend Docs**: [tradehax-frontend/README.md](./tradehax-frontend/README.md) - ---- - -## πŸ’¬ Get Help - -- Check console errors: F12 β†’ Console tab -- Read error messages carefully -- See troubleshooting in full docs -- Ask in Discord community - ---- - -**You've got this! πŸ€** diff --git a/QUICK_VISUAL_FIX.md b/QUICK_VISUAL_FIX.md deleted file mode 100644 index 16efb7d6..00000000 --- a/QUICK_VISUAL_FIX.md +++ /dev/null @@ -1,251 +0,0 @@ -# 🎯 Quick Visual Guide: Fix Vercel Deployment in 3 Steps - -**Time Required:** 3 minutes -**Skill Level:** Beginner -**Impact:** Fixes critical deployment failure - ---- - -## Before You Start - -### Check if This Fix Applies to You - -**You should use this guide if you see:** -``` -❌ Error: ENOENT: no such file or directory, open '/vercel/path0/package.json' -❌ Command "npm install" exited with 254 -``` - -**In your Vercel deployment logs at:** https://vercel.com/dashboard - ---- - -## πŸ”§ Step 1: Change Production Branch (1 minute) - -### Visual Path: -``` -Vercel Dashboard - ↓ -Your Project (click it) - ↓ -βš™οΈ Settings (top menu) - ↓ -🌿 Git (left sidebar) - ↓ -πŸ“ Production Branch (find this section) -``` - -### What You'll See: -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Production Branch β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ gh-pages β–Ό β”‚ β”‚ ← WRONG! -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ -β”‚ [Save] β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -### What to Change: -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Production Branch β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ main β–Ό β”‚ β”‚ ← CORRECT! -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ -β”‚ [Save] ← CLICK THIS β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -### Action: -1. Click the dropdown that says "gh-pages" -2. Select "main" -3. Click the "Save" button - -βœ… **Checkpoint:** Settings saved successfully - ---- - -## πŸš€ Step 2: Redeploy (1 minute) - -### Visual Path: -``` -Vercel Dashboard - ↓ -Your Project - ↓ -πŸ“¦ Deployments (top menu) - ↓ -Latest deployment (top of list, shows "Failed") - ↓ -β‹― (three dots menu button) - ↓ -πŸ”„ Redeploy (click it) -``` - -### What You'll See: -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Redeploy to Production β”‚ -β”‚ β”‚ -β”‚ ☐ Use existing Build Cache β”‚ ← UNCHECK THIS! -β”‚ β”‚ -β”‚ [Cancel] [Redeploy] β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -### Action: -1. Make sure "Use existing Build Cache" is **UNCHECKED** (empty checkbox) -2. Click the blue "Redeploy" button - -βœ… **Checkpoint:** Build is running (you'll see a progress indicator) - ---- - -## ⏱️ Step 3: Wait & Verify (1-2 minutes) - -### During Build (30-120 seconds): - -You'll see this: -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ πŸ”¨ Building β”‚ -β”‚ ━━━━━━━━━━━━━━━━━━━░░░░ 75% β”‚ -β”‚ β”‚ -β”‚ Running "npm install"... β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -**What's Different Now:** -- βœ… npm install will find package.json (from main branch) -- βœ… Build will complete successfully -- βœ… Site will deploy - -### After Build Completes: - -Success looks like this: -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ βœ… Ready β”‚ -β”‚ Production: tradehaxai.tech β”‚ -β”‚ Domain: tradehaxai.tech β”‚ -β”‚ Build Time: 2m 15s β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -### Final Verification: - -1. **Visit your site:** https://tradehaxai.tech -2. **Check for:** - - βœ… Site loads (no errors) - - βœ… Green padlock in browser (HTTPS working) - - βœ… Images and content visible - - βœ… Navigation works - -βœ… **SUCCESS!** Your site is now live. - ---- - -## πŸŽ‰ What Just Happened? - -### Before Fix: -``` -Vercel tries to build from gh-pages branch: -gh-pages/ -β”œβ”€β”€ index.html ← Already built HTML -β”œβ”€β”€ _next/ ← Already built JS -β”œβ”€β”€ 404.html -❌ NO package.json ← Vercel can't find this! -❌ NO source code -``` - -**Result:** Build fails because Vercel can't run `npm install` - -### After Fix: -``` -Vercel builds from main branch: -main/ -β”œβ”€β”€ package.json βœ… Found! -β”œβ”€β”€ app/ βœ… Source code -β”œβ”€β”€ components/ βœ… React components -β”œβ”€β”€ next.config.ts βœ… Config -└── vercel.json βœ… Deploy settings -``` - -**Result:** Build succeeds, site deploys - ---- - -## πŸ†˜ Still Having Issues? - -### If Build Still Fails: - -**Check the error message in Vercel logs:** - -#### Error: Missing GitHub Secrets -``` -Error: missing required VERCEL_TOKEN -``` -**Fix:** See [GITHUB_SECRETS_SETUP.md](./GITHUB_SECRETS_SETUP.md) - -#### Error: Build Command Failed -``` -Error: Command "npm run build" failed -``` -**Fix:** Test locally: `npm run build` - -#### Error: Environment Variables -``` -Error: NEXT_PUBLIC_XXX is not defined -``` -**Fix:** Add in Vercel Dashboard β†’ Settings β†’ Environment Variables - -### If Site Doesn't Load: - -**Check DNS:** -```bash -nslookup tradehaxai.tech -# Should return: 76.76.21.21 -``` - -**If wrong IP:** See [VERCEL_DOMAIN_SETUP.md](./VERCEL_DOMAIN_SETUP.md) - -### If You See Old Version: - -**Clear Cache:** -- Press: `Ctrl + Shift + R` (Windows/Linux) -- Press: `Cmd + Shift + R` (Mac) - ---- - -## πŸ“š More Resources - -| Document | When to Use | -|----------|-------------| -| [DEPLOYMENT_FIX_CHECKLIST.md](./DEPLOYMENT_FIX_CHECKLIST.md) | Systematic troubleshooting | -| [VERCEL_DEPLOYMENT_TROUBLESHOOTING.md](./VERCEL_DEPLOYMENT_TROUBLESHOOTING.md) | Comprehensive error guide | -| [VERCEL_BRANCH_FIX.md](./VERCEL_BRANCH_FIX.md) | Detailed explanation | - ---- - -## βœ… Success Checklist - -After following this guide, you should have: - -- [x] Changed production branch to `main` in Vercel -- [x] Redeployed without build cache -- [x] Verified build shows "Ready" status -- [x] Confirmed site loads at https://tradehaxai.tech -- [x] Checked HTTPS padlock is present -- [x] Tested that pages navigate correctly - ---- - -**Created:** 2026-02-08 -**For:** Fixing "Could not read package.json" deployment error -**Estimated Time:** 3 minutes -**Success Rate:** 99% (if followed correctly) - -🎯 **This is the simplest possible fix - no coding required!** diff --git a/SETUP_VERIFICATION.md b/SETUP_VERIFICATION.md deleted file mode 100644 index 0c9b2271..00000000 --- a/SETUP_VERIFICATION.md +++ /dev/null @@ -1,207 +0,0 @@ -# TradeHax HF Fine-Tuning Setup Verification - -## βœ… Files Status - -All required files are in place: - -### API & Client Files - -- [x] `app/api/hf-server/route.ts` β€” Server-side HF Inference endpoint -- [x] `components/hf-client.ts` β€” React hook for client API calls - -### Fine-Tuning Scripts -- [x] `scripts/fine-tune-mistral-lora.py` β€” Mistral-7B LoRA trainer -- [x] `scripts/fine-tune-requirements.txt` β€” Python dependencies (pinned) -- [x] `scripts/run-finetune-workflow.js` β€” Node.js orchestrator - -### Documentation & Config -- [x] `docs/HF_FINE_TUNING_WORKFLOW.md` β€” Full workflow guide -- [x] `.env.example` β€” Environment variable template - -### npm Scripts (in package.json) -- [x] `npm run llm:finetune:deps` β€” Install Python dependencies -- [x] `npm run llm:finetune` β€” Run fine-tuning directly -- [x] `npm run llm:finetune:push` β€” Fine-tune + push to Hub -- [x] `npm run llm:finetune:workflow` β€” Orchestrated fine-tuning -- [x] `npm run llm:finetune:workflow:push` β€” Orchestrated + Hub push - -## πŸ“‹ Setup Checklist - -### Step 1: Clone & Navigate -```bash -git clone https://github.com/DarkModder33/main.git -cd main -``` -βœ… **Status:** Repository cloned with commit 29b8ee9 - -### Step 2: Install Node Dependencies -```bash -npm install -``` -Required: `@huggingface/inference` (already in package.json) - -### Step 3: Install Python Dependencies -```bash -npm run llm:finetune:deps -``` -This installs: -- transformers 4.38.2 -- datasets 2.17.1 -- peft 0.9.0 -- accelerate 0.27.2 -- bitsandbytes 0.42.0 -- evaluate 0.4.1 -- huggingface-hub 0.21.3 -- torch 2.2.1 - -### Step 4: Configure Environment -Create `.env.local` (copy from `.env.example`): -```bash -HF_API_TOKEN=hf_YOUR_TOKEN_HERE -HF_MODEL_ID=mistralai/Mistral-7B-Instruct-v0.1 -HF_HUB_MODEL_ID=your-org/tradehax-mistral-finetuned -DATASET_PATH=data/custom-llm/tradehax-training-expanded.jsonl -TRAIN_EPOCHS=3 -TRAIN_BATCH_SIZE=4 -TRAIN_LR=2e-5 -LORA_R=16 -LORA_ALPHA=32 -``` - -**Getting HF_API_TOKEN:** -1. Visit https://huggingface.co/settings/tokens -2. Create new token (write access) -3. Add to `.env.local` - -### Step 5: Prepare Training Data -Ensure `data/custom-llm/tradehax-training-expanded.jsonl` exists with format: -```json -{"text": "Your training text here"} -``` - -### Step 6: Run Fine-Tuning - -**Option A: Direct Python** -```bash -npm run llm:finetune -``` - -**Option B: With Hub Push** -```bash -npm run llm:finetune:push -``` - -**Option C: Node Orchestrator** -```bash -npm run llm:finetune:workflow -``` - -**Option D: Orchestrator + Hub Push** -```bash -npm run llm:finetune:workflow:push -``` - -### Step 7: Verify Training Output -After training completes: -- Check `./fine-tuned-tradehax-mistral/` directory -- Verify model uploaded to Hub: - -### Step 8: Test API Endpoint - -**Local Test (curl):** -```bash -curl -X POST http://localhost:3000/api/hf-server \ - -H "Content-Type: application/json" \ - -d '{"prompt": "Generate a trading strategy", "task": "text-generation"}' -``` - -**Frontend Test:** -Import in React component: -```typescript -import { useHfClient } from '@/components/hf-client'; - -export function MyComponent() { - const { callHfApi, loading, error } = useHfClient(); - - const handleGenerate = async () => { - const result = await callHfApi('Your prompt', 'text-generation'); - console.log(result); - }; - - return ; -} -``` - -### Step 9: Deploy to Vercel - -1. Update Vercel environment variables: - - Add `HF_API_TOKEN` (keep private) - - Add other HF_* variables from `.env.example` - -2. Deploy: -```bash -git push origin main -``` -(Auto-deploys to Vercel) - -3. Test live endpoint: -```bash -curl -X POST https://tradehax.net/api/hf-server \ - -H "Content-Type: application/json" \ - -d '{"prompt": "Trading bot signals", "task": "text-generation"}' -``` - -## πŸ§ͺ Testing Matrix - -| Component | Local | Vercel | Status | -|-----------|-------|--------|--------| -| API endpoint | βœ… | βœ… | Ready | -| Client hook | βœ… | βœ… | Ready | -| Fine-tuning script | ⏳ | - | Install deps first | -| Model upload | ⏳ | - | Requires HF token | -| TypeScript types | βœ… | βœ… | All passing | -| npm scripts | βœ… | βœ… | All defined | - -## πŸ“Š Current Configuration - -```yaml -Base Model: mistralai/Mistral-7B-Instruct-v0.1 -LoRA Config: - r: 16 - alpha: 32 - target_modules: [q_proj, v_proj] -Training: - epochs: 3 - batch_size: 4 - learning_rate: 2e-5 - max_length: 512 -Output Hub: your-org/tradehax-mistral-finetuned -``` - -## πŸš€ Next Steps - -1. **Prepare Dataset**: Create or upload `data/custom-llm/tradehax-training-expanded.jsonl` -2. **Run Fine-Tuning**: `npm run llm:finetune:workflow:push` -3. **Test API**: Call `/api/hf-server` with sample prompt -4. **Deploy**: Push to GitHub β†’ Auto-deploy to Vercel -5. **Monitor**: Check Hub for model metrics & training logs - -## πŸ†˜ Troubleshooting - -| Issue | Solution | -|-------|----------| -| `HF_API_TOKEN not found` | Set in `.env.local`: `export HF_API_TOKEN=hf_...` | -| `Dataset not found` | Check path: `ls data/custom-llm/tradehax-training-expanded.jsonl` | -| `CUDA out of memory` | Reduce `TRAIN_BATCH_SIZE` or use `load_in_4bit=True` (already enabled) | -| `API returns 500` | Check server logs: `npm run dev` and inspect `/api/hf-server` | -| `Type errors` | Run `npm run type-check` and fix imports | - -## πŸ“ž Support - -For setup issues or consultations: **darkmodder33@proton.me** - ---- - -**Last Updated:** 2026-02-24 -**Commit:** 29b8ee9 -**Status:** βœ… Ready to deploy diff --git a/TESTING_GUIDE.md b/TESTING_GUIDE.md deleted file mode 100644 index 958f7bab..00000000 --- a/TESTING_GUIDE.md +++ /dev/null @@ -1,347 +0,0 @@ -# Quick Start Guide: Testing & Deployment - -**For Developers and Testers** - -## πŸš€ Quick Setup (5 minutes) - -### Step 1: Get WalletConnect Project ID (Optional) - -**Note:** A default Reown (WalletConnect) Project ID is already configured. To use your own: - -1. Go to https://dashboard.reown.com (formerly WalletConnect Cloud) -2. Sign up/login (free account) -3. Click "Create Project" -4. Copy your Project ID (looks like: `abc123def456...`) - -### Step 2: Configure Environment - -```bash -cd tradehax-frontend - -# Create .env if it doesn't exist -cp .env.example .env - -# The default Project ID is already in .env.example -# To use your own, edit .env -nano .env # or use your favorite editor -``` - -The `.env` file will have: -``` -VITE_WALLETCONNECT_PROJECT_ID=79b6b869e8bc24644ece855d8edbe246 -``` - -### Step 3: Install and Build - -```bash -# Install dependencies (if not done) -npm install - -# Build for production -npm run build - -# Or run in development mode -npm run dev -``` - -## πŸ“± Testing Checklist - -### Desktop Testing (Chrome/Firefox/Edge) - -**Phantom Wallet:** -- [ ] Click wallet button -- [ ] Select Phantom -- [ ] Wallet extension opens -- [ ] Approve connection -- [ ] Wallet address shown in HUD -- [ ] Play game, collect items -- [ ] Game state persists on refresh - -**Solflare Wallet:** -- [ ] Same as Phantom above - -**WalletConnect (Trust Wallet/MetaMask):** -- [ ] Click wallet button -- [ ] Select "WalletConnect" -- [ ] QR code modal appears -- [ ] Scan with mobile wallet app -- [ ] Approve connection -- [ ] Wallet address shown in HUD -- [ ] Play game, verify transactions - -**Web5 DID:** -- [ ] Check HUD shows `πŸ†” DID: did:dht:...` -- [ ] Check HUD shows `βœ“ Web5 Connected` -- [ ] Play game, collect items -- [ ] Refresh browser -- [ ] Game state should load from Web5 -- [ ] Try incognito mode (should use localStorage fallback) - -### Mobile Testing (iOS Safari) - -**Setup:** -1. Open Safari on iPhone -2. Navigate to your test URL -3. Allow popups if prompted - -**Phantom Mobile:** -- [ ] Ensure Phantom app installed -- [ ] Click wallet button -- [ ] Should auto-open Phantom app -- [ ] Approve connection in app -- [ ] Return to browser -- [ ] Verify connection in HUD - -**Trust Wallet Mobile:** -- [ ] Ensure Trust Wallet app installed -- [ ] Click wallet button -- [ ] Select WalletConnect -- [ ] Should open Trust Wallet app -- [ ] Approve connection -- [ ] Return to Safari - -**MetaMask Mobile:** -- [ ] Ensure MetaMask app installed -- [ ] Click wallet button -- [ ] Select WalletConnect -- [ ] Should open MetaMask app -- [ ] Approve connection -- [ ] Return to Safari - -**Mobile UX:** -- [ ] Touch controls responsive -- [ ] Buttons at least 44x44 pixels -- [ ] Safe area (notch) handled correctly -- [ ] Landscape mode works -- [ ] Viewport fills screen properly -- [ ] No horizontal scrolling -- [ ] HUD readable at small size - -**Web5 on Mobile:** -- [ ] DID created on first load -- [ ] DID shown in HUD (truncated) -- [ ] Game state saves -- [ ] Refresh preserves state - -### Mobile Testing (Android Chrome) - -Same checklist as iOS Safari, but: -- Use Chrome browser -- Test on Android device -- Verify deep links work differently -- Check performance on lower-end devices - -### Performance Testing - -**Desktop:** -- [ ] Game loads in < 5 seconds -- [ ] Smooth 60 FPS gameplay -- [ ] No lag when collecting items -- [ ] Animations smooth - -**Mobile:** -- [ ] Game loads in < 10 seconds -- [ ] Smooth 30+ FPS gameplay -- [ ] Touch controls responsive -- [ ] No excessive lag -- [ ] Battery drain acceptable - -## πŸ› Common Issues & Solutions - -### "WalletConnect is not defined" -**Solution:** Add VITE_WALLETCONNECT_PROJECT_ID to .env and rebuild - -### "Web5 initialization failed" -**Solution:** -- Check browser supports IndexedDB (modern browser required) -- Clear browser data and try again -- Game still works with localStorage fallback - -### Mobile wallet doesn't open -**Solution:** -- Ensure wallet app installed from App Store/Play Store -- Allow browser to open external apps -- Try opening wallet app first, then browse from within app - -### Game won't load on mobile -**Solution:** -- Check console for errors -- Try different browser (Chrome vs Safari) -- Ensure device has WebGL support -- Clear browser cache - -### Poor performance on mobile -**Solution:** -- Close background apps -- Check device has 2GB+ RAM -- Game automatically uses lower quality on weak devices -- Try landscape mode - -## πŸ“Š What to Test For - -### Wallet Connection -- βœ… Multiple wallets work -- βœ… Connection persists across page refreshes -- βœ… Disconnect works -- βœ… Reconnect works -- βœ… Wallet address displayed correctly - -### Web5 DID -- βœ… DID created automatically -- βœ… DID persists across sessions -- βœ… Game state saves to DWN -- βœ… Game state loads from DWN -- βœ… localStorage fallback works - -### Mobile Experience -- βœ… Touch controls work -- βœ… Deep linking works -- βœ… UI scales properly -- βœ… Safe areas respected -- βœ… Performance acceptable -- βœ… No horizontal scroll - -### Game Functionality -- βœ… Can move player -- βœ… Can collect items -- βœ… Score updates -- βœ… Level progression works -- βœ… NFT minting works -- βœ… Transactions succeed - -## 🚒 Deployment Steps - -### Build for Production - -```bash -cd tradehax-frontend -npm run build -``` - -This creates `dist/` folder with optimized files. - -### Deploy to GitHub Pages - -```bash -# From repository root -cp -r tradehax-frontend/dist/* games/hyperborea/ - -git add games/hyperborea -git commit -m "Deploy Hyperborea alpha with WalletConnect & Web5" -git push origin main -``` - -### Deploy to Vercel - -```bash -cd tradehax-frontend -vercel --prod -``` - -Or use Vercel GitHub integration. - -### Deploy to Netlify - -```bash -cd tradehax-frontend -netlify deploy --prod --dir=dist -``` - -### Environment Variables in Production - -**GitHub Pages:** -- No server-side env vars -- All config must be in built files -- WalletConnect ID embedded in build - -**Vercel/Netlify:** -- Add env vars in dashboard: - - `VITE_WALLETCONNECT_PROJECT_ID` - - `VITE_BACKEND_URL` - - `VITE_SOLANA_NETWORK` - -## πŸ“ˆ Monitoring After Deployment - -### Check These Metrics - -1. **Wallet Connection Success Rate** - - Track which wallets work - - Identify connection failures - -2. **Web5 DID Creation Rate** - - See how many users get DIDs - - Check initialization errors - -3. **Mobile Usage** - - Track mobile vs desktop users - - Monitor mobile performance - -4. **Game Engagement** - - Items collected - - Average session time - - Return rate - -### Tools to Use - -- Google Analytics (already in index.html) -- Browser console logs -- Vercel/Netlify analytics -- Custom error tracking - -## βœ… Definition of Done - -### Before Marking Complete - -- [ ] WalletConnect Project ID configured -- [ ] Tested on 2+ desktop browsers -- [ ] Tested on iOS Safari -- [ ] Tested on Android Chrome -- [ ] All 4 wallets connect successfully -- [ ] Web5 DID creates successfully -- [ ] Game state persists correctly -- [ ] Mobile performance acceptable -- [ ] No critical bugs found -- [ ] Documentation reviewed -- [ ] Deployed to staging environment -- [ ] Smoke testing passed - -### Alpha Release Criteria - -- [ ] All "Before Marking Complete" items βœ… -- [ ] Security review passed -- [ ] Performance benchmarks met -- [ ] User documentation finalized -- [ ] Support channels ready -- [ ] Rollback plan prepared -- [ ] Monitoring configured - -## 🎯 Success Metrics - -**Alpha is successful if:** -- 80%+ wallet connections succeed -- 70%+ users get Web5 DID -- 60%+ mobile users can play smoothly -- 90%+ game state persists correctly -- < 5% critical bug rate - -## πŸ“ž Support & Help - -**Found a bug?** -1. Check IMPLEMENTATION_SUMMARY.md for known issues -2. Check WALLETCONNECT_WEB5_GUIDE.md for solutions -3. Create GitHub issue with: - - Browser/device info - - Steps to reproduce - - Console errors - - Screenshots - -**Need help?** -- Discord: [Community link] -- Email: support@tradehax.net -- GitHub Discussions - ---- - -**Last Updated:** January 2, 2026 -**Version:** 1.0.0-alpha -**Status:** Ready for Testing diff --git a/TRADEBOT_TRAINING_PIPELINE.md b/TRADEBOT_TRAINING_PIPELINE.md deleted file mode 100644 index b8f1df57..00000000 --- a/TRADEBOT_TRAINING_PIPELINE.md +++ /dev/null @@ -1,126 +0,0 @@ -# Tradebot Training Pipeline - -This pipeline prepares high-signal training data for TradeHax tradebots with repeatable quality controls. - -## What it builds - -Running the build generates: - -- `data/tradebot/train.chat.jsonl` – main chat-format training set -- `data/tradebot/validation.chat.jsonl` – validation set for early stopping and regression checks -- `data/tradebot/train.raw.jsonl` – normalized raw training records -- `data/tradebot/validation.raw.jsonl` – normalized raw validation records -- `data/tradebot/manifest.json` – stats, quality metrics, and config used -- `data/tradebot/eval-suite.jsonl` – paper-trading evaluation prompts + rubric metadata (when generated) -- `data/tradebot/eval-score.json` – scored benchmark report (after response scoring) - -## Commands - -- `npm run tradebot:prepare-training` – curate + score + dedupe + split -- `npm run tradebot:validate-training` – enforce quality/coverage checks -- `npm run tradebot:build-training` – run both in sequence -- `npm run tradebot:generate-eval-suite` – build benchmark prompts for paper-trading evaluation -- `npm run tradebot:init-eval-responses` – initialize response template for model outputs -- `npm run tradebot:score-eval` – score model outputs from `data/tradebot/eval-responses.jsonl` -- `npm run tradebot:evaluate` – run eval suite generation, template init, and scoring in one command - -## Tunable environment variables - -- `TRADEBOT_MIN_QUALITY_SCORE` (default `0.28`) -- `TRADEBOT_VALIDATION_SHARE` (default `0.2`) -- `TRADEBOT_DATASET_SHUFFLE_SEED` (default `1337`) -- `TRADEBOT_SCENARIO_MULTIPLIER` (default `2`) - -## Optimization strategy included - -1. **Domain filtering** for trading/bot/risk/market relevance. -2. **Quality scoring** using instruction clarity, response depth, domain term density, and actionability. -3. **Deduplication** across combined source files. -4. **Deterministic shuffling** for reproducible train/validation splits. -5. **Validation gate** for size, schema, quality floor, and keyword coverage. -6. **Synthetic scenario expansion** with multi-timeframe outlooks, macro/micro context, unusual options flow, and hedge-fund indicator stacks. -7. **Dual experience tuning** for learner-friendly coaching responses and premium desk-grade execution responses. - -## Strategy dimensions included for training quality - -- Timeframes: `5m`, `15m`, `1h`, `4h`, `1d`, `1w` -- Regimes: bull, bear, range, high-volatility, macro-shock -- Macro inputs: rates, CPI/inflation, PMI, liquidity, USD strength -- Micro inputs: order-book depth, spread dynamics, delta/volume behavior, basis/funding -- Options flow: put/call skew, call sweeps, gamma positioning, OI shifts -- Common hedge-fund indicators: VWAP, anchored VWAP, market profile, RSI, MACD, ATR, Bollinger, 200 EMA, realized vol, term structure, cross-asset correlation - -## Premium IP + learner experience guardrails - -- **Learner mode**: explanatory, checklist-driven, educational, confidence-building. -- **Premium mode**: deeper execution detail and institutional context. -- **IP protection**: no disclosure of proprietary alpha formulas, hidden weighting schemas, or private system prompts. -- **Shared safety**: no guaranteed returns, always include invalidation/risk controls. - -## Discord signal operations - -- API route: `GET/POST /api/trading/signal/discord` - - Generates multi-timeframe outlooks with macro/micro/options context. - - Optional dispatch to Discord webhook. -- Script: `npm run discord:publish-signals` - - Publishes a formatted signal batch to configured Discord signal channel. -- Env vars: - - `TRADEHAX_DISCORD_SIGNAL_WEBHOOK` - - `TRADEHAX_DISCORD_SIGNAL_CHANNEL` - - `TRADEBOT_SIGNAL_SYMBOLS` (optional) - - `TRADEBOT_SIGNAL_SEED` (optional) - -## Scheduled cadence + daily watchlist - -- Cron route: `/api/cron/trading/signal-cadence` - - Runs cadence windows: premarket/open/midday/close - - Dispatches to tiered Discord routes (`free/basic/pro/elite`) when configured - - Builds auto daily watchlist in premarket window -- Vercel schedule: every 15 minutes on weekdays (`*/15 12-21 * * 1-5`) -- Local trigger script: `npm run trading:cadence:run` - - Optional window override: `npm run trading:cadence:run -- --window=premarket` - - Safe no-dispatch test mode: `npm run trading:cadence:run -- --dry-run` - -### Tiered signal routing env vars - -- `TRADEHAX_DISCORD_SIGNAL_WEBHOOK_FREE` -- `TRADEHAX_DISCORD_SIGNAL_WEBHOOK_BASIC` -- `TRADEHAX_DISCORD_SIGNAL_WEBHOOK_PRO` -- `TRADEHAX_DISCORD_SIGNAL_WEBHOOK_ELITE` -- `TRADEHAX_DISCORD_SIGNAL_CHANNEL_FREE` -- `TRADEHAX_DISCORD_SIGNAL_CHANNEL_BASIC` -- `TRADEHAX_DISCORD_SIGNAL_CHANNEL_PRO` -- `TRADEHAX_DISCORD_SIGNAL_CHANNEL_ELITE` - -### Burst protection env vars - -- `TRADEHAX_DISCORD_SIGNAL_BURST_MAX` (default `4`) -- `TRADEHAX_DISCORD_SIGNAL_BURST_WINDOW_MS` (default `60000`) - -### Cadence controls - -- `TRADEHAX_SIGNAL_TIMEZONE` (default `America/New_York`) -- `TRADEHAX_SIGNAL_CADENCE_TOLERANCE_MIN` (default `20`) -- `TRADEHAX_SIGNAL_CADENCE_PREMARKET` (default `08:15`) -- `TRADEHAX_SIGNAL_CADENCE_OPEN` (default `09:35`) -- `TRADEHAX_SIGNAL_CADENCE_MIDDAY` (default `12:15`) -- `TRADEHAX_SIGNAL_CADENCE_CLOSE` (default `15:50`) -- `TRADEHAX_SIGNAL_CADENCE_TIERS` (default `free,basic,pro,elite`) -- `TRADEHAX_SIGNAL_DAILY_WATCHLIST_USER` (default `market_daily_watchlist`) - -## Recommended training loop for tradebots - -1. Build datasets: `npm run tradebot:build-training` -2. Fine-tune/evaluate your model with `train.chat.jsonl` + `validation.chat.jsonl` -3. Benchmark against paper-trading tasks (entry/exit/risk prompts) -4. Export runtime interactions and append to source JSONL -5. Rebuild and re-train weekly (or after major strategy updates) - -## Paper-trading evaluation harness - -1. Generate scenarios: `npm run tradebot:generate-eval-suite` -2. Run your model against each prompt in `data/tradebot/eval-suite.jsonl` -3. Save outputs to `data/tradebot/eval-responses.jsonl` with shape: - - `{ "id": "eval_001", "response": "...model answer..." }` -4. Score run quality: `npm run tradebot:score-eval` -5. Inspect `data/tradebot/eval-score.json` for pass/caution status and component-level scores diff --git a/TRADEHAX_AI_PLATFORM_SUMMARY.md b/TRADEHAX_AI_PLATFORM_SUMMARY.md deleted file mode 100644 index bae227db..00000000 --- a/TRADEHAX_AI_PLATFORM_SUMMARY.md +++ /dev/null @@ -1,188 +0,0 @@ -# TradeHax AI Platform - Complete Setup Summary - -## βœ… What's Been Built - -### 1. **Trading Bot System** βœ… -- `lib/trading/tradehax-bot.ts` - Core bot logic with signal processing -- `lib/trading/solana-dex.ts` - DEX integration (Raydium, Orca, Marinade) -- `app/api/trading/bot/create` - Create bots API -- `app/api/trading/signal/process` - Process trading signals -- `app/api/trading/bot/[id]/stats` - Bot statistics -- `app/trading/page.tsx` - Dashboard for bot management - -### 2. **Smart Environment System** βœ… -- `lib/ai/smart-environment.ts` - Context-aware environment -- Tracks user profile, market data, active bots, conversation history -- Auto-generates system prompts with market context -- `app/api/environment/init` - Initialize user session -- `app/api/environment/update-context` - Update market/interaction data -- `app/api/environment/context` - Fetch current context -- `components/ai/SmartEnvironmentMonitor.tsx` - Dashboard UI - -### 3. **Image Generation** βœ… -- `lib/ai/image-generator.ts` - Image generation interface -- `app/api/ai/generate-image` - API endpoint -- `components/ai/ImageGeneratorComponent.tsx` - UI component -- Supports: trading charts, NFT art, hero images, general images -- Integrates with Stable Diffusion, DALL-E - -### 4. **AI Hub Platform** βœ… -- `app/ai-hub/page.tsx` - Unified AI hub page -- Smart environment monitor with portfolio tracking -- Image generator component -- AI chat interface -- Text generator -- Dataset & model info -- Getting started guide - -### 5. **Training Datasets** βœ… -- `tradehax-training-expanded.jsonl` - 20 Q&A pairs on trading strategies -- `tradehax-crypto-education.jsonl` - 10 Q&A pairs on blockchain/DeFi -- `ai-training-set.jsonl` - 26 Q&A pairs on TradeHax features -- Total: 56+ training examples ready for fine-tuning - -### 6. **VSCode Extension** βœ… -- `.vscode-extension/package.json` - Extension configuration -- `.vscode-extension/src/extension.ts` - Extension code -- Commands: openDashboard, createBot, viewStats, startBot, stopBot -- Webview dashboard with real-time bot monitoring -- Can be packaged and published to VSCode Marketplace - -### 7. **AI/LLM Integration** βœ… -- Hugging Face Inference API integration -- 4 API endpoints for text generation -- Chat with conversation history -- Summarization -- Streaming text generation (SSE) -- Model support: Mistral-7B, GPT-2, BERT, etc. - -## 🎯 Pages & Routes - -| Route | Purpose | -|-------|---------| -| `/ai` | AI Chat & Text Generation | -| `/ai-hub` | Complete AI Platform Hub | -| `/trading` | Trading Bot Dashboard | -| `/game` | Hyperborea Game | -| `/api/ai/generate` | Text generation API | -| `/api/ai/chat` | Chat API | -| `/api/ai/generate-image` | Image generation API | -| `/api/environment/init` | Initialize environment | -| `/api/environment/context` | Fetch context | -| `/api/trading/bot/create` | Create bot | -| `/api/trading/signal/process` | Process signal | - -## πŸš€ Features - -### Smart Environment -- βœ… User profile & preferences -- βœ… Portfolio tracking -- βœ… Market data integration (SOL, USDC, RAY, BTC, ETH) -- βœ… Active bot monitoring -- βœ… Trading signal tracking -- βœ… Conversation history -- βœ… Context-aware system prompts - -### Trading Bots -- βœ… Multiple strategies: scalping, swing, long-term, arbitrage -- βœ… Risk levels: low, medium, high -- βœ… Position sizing and stop-loss alerts -- βœ… Real-time statistics (win rate, P&L) -- βœ… Active trade monitoring -- βœ… Solana DEX integration - -### AI Capabilities -- βœ… Context-aware chat (knows your portfolio, risk tolerance, bots) -- βœ… Text generation for trading advice, strategies, education -- βœ… Image generation for charts, NFTs, UI -- βœ… Multi-model support (Mistral, GPT-2, Stable Diffusion, DALL-E) -- βœ… Streaming responses -- βœ… Fine-tuning ready with datasets - -### Image Generation -- βœ… Trading charts (1024x768) -- βœ… NFT artwork (1024x1024) -- βœ… Hero images (1920x1080) -- βœ… General images -- βœ… Batch generation support -- βœ… Style-specific prompting - -## πŸ“Š Datasets Ready for Upload - -### Hugging Face Hub -1. **tradehax-behavioral** (Create at: https://huggingface.co/new-dataset) - - 56+ Q&A pairs - - Format: JSONL - - Topics: Trading, DeFi, Crypto, NLP, UI Generation - -2. **tradehax-training-expanded** (20 pairs) - - UI generation, trading strategy, bot setup - - DeFi concepts, risk management - -3. **tradehax-crypto-education** (10 pairs) - - Solana blockchain, smart contracts - - Market sentiment, yield farming - - Spot vs futures, slippage - -## πŸ”§ Configuration - -### Environment Variables (`.env.local`) -``` -HF_API_TOKEN=your_token_here -HF_MODEL_ID=mistralai/Mistral-7B-Instruct-v0.1 -LLM_TEMPERATURE=0.7 -LLM_MAX_LENGTH=512 -``` - -### VSCode Extension Installation -```bash -cd .vscode-extension -npm install -npm run esbuild -# Creates .vsix package for distribution -``` - -## πŸ“ Next Steps - -1. **Unblock GitHub Secret** - - Go to: https://github.com/DarkModder33/main/security/secret-scanning/unblock-secret/39pUwTaMT2UmANzmd6usQgWMN2P - - Click "Allow" to unblock HF token - - Retry push - -2. **Deploy to Vercel** - ```bash - git push origin main - # Vercel auto-deploys to tradehax.net & tradehaxai.tech - ``` - -3. **Upload Training Datasets** - - Create dataset: https://huggingface.co/new-dataset - - Name: `tradehax-behavioral` - - Upload files from repo root - -4. **Build VSCode Extension** - ```bash - cd .vscode-extension - npm install && npm run esbuild - # Publish to VSCode Marketplace - ``` - -5. **Test All Features** - - Visit `/ai-hub` - Full platform demo - - Visit `/trading` - Bot management dashboard - - Visit `/ai` - Chat interface - - Test image generation - - Monitor smart environment - -## πŸŽ‰ You Now Have - -βœ… Complete AI trading platform -βœ… Smart context-aware environment -βœ… Image generation (charts, NFTs, UI) -βœ… Training datasets (56+ examples) -βœ… VSCode integration -βœ… Solana DEX bot framework -βœ… Multi-model LLM support -βœ… Production-ready code - -All ready for deployment to tradehax.net & tradehaxai.tech! diff --git a/TRANSFORMATION_COMPLETE.md b/TRANSFORMATION_COMPLETE.md deleted file mode 100644 index f097eed5..00000000 --- a/TRANSFORMATION_COMPLETE.md +++ /dev/null @@ -1,298 +0,0 @@ -# TradeHax Digital Empire Transformation - Complete Summary - -**Date**: March 10, 2026 -**Vision**: Transform tradehax.net from 3 disconnected tools into a unified, gravity-well platform with network effects -**Status**: βœ… Foundation Complete | Executing Week 1-4 Roadmap - ---- - -## 🎯 What We Built - -### 1. **Strategic Foundation Document** - -- `DIGITAL_EMPIRE_STRATEGY.md` - Master 52-week blueprint -- `90_DAY_EXECUTION_PLAN.md` - Hyper-focused 12-week roadmap -- Metrics targets: 500β†’50K MAU, 2%β†’8% conversion, K coefficient 0.3β†’1.5 - -### 2. **UX Transformation (3 Quick Wins)** - -#### βœ… Win #1: Hero Clarity - -**What**: Redesigned landing page hero section -**Before**: "One platform. Three precision environments." -**After**: "Multiply Your Edge. Deploy AI agents across trading, music, and services." -**Impact**: +30-40% CTA click-through expected - -**Key messaging**: - -``` -πŸ€– For traders: AI signals that work. Paper trade risk-free. -🎸 For creators: AI guitar coach & promotion engine. -⚑ For builders: AI agents as a service. Import template. -``` - -#### βœ… Win #2: Gamified Onboarding System - -**What**: 4-step achievement path with progressive unlocks -**Component**: `GamifiedOnboarding.tsx` (252 lines, localStorage persisted) - -**Achievement Path**: - -``` -1. Discover β†’ Explore one platform β†’ +250 XP + Badge -2. Analyze β†’ Run one AI scan β†’ +500 XP + Badge -3. Create β†’ Generate one asset β†’ +750 XP + Badge -4. Connect β†’ Link wallet β†’ +1000 XP + $100 credits ✨ -``` - -**Why this works**: - -- Gamification increases completion 5x -- Progressive disclosure (not overwhelming) -- Clear reward hierarchy -- Unlockable Discord role incentive - -#### βœ… Win #3: Unified Value Prop - -**What**: Every entry point now explains "for whom" -**Before**: "Trade Intelligence" (generic) -**After**: "For traders: Real-time signals, backtesting, automated execution" (specific) -**Impact**: 5x faster comprehension for first-time users - ---- - -## πŸ“Š Technical Execution - -### Commits Created - -| Commit | Message | Impact | -|--------|---------|--------| -| `8045a01` | PM2 ecosystem config for env vars | Fixes production startup | -| `210db31` | UX transformation (hero+onboarding) | User experience | -| `ddea3aa` | 90-day execution plan | Strategic roadmap | -| `ef26968` | Eslint fix (apostrophe escape) | Build validation | - -### Code Added - -- `components/onboarding/GamifiedOnboarding.tsx` - 252 lines, client component -- `app/page.tsx` - Updated hero section (40 line diff) -- `app/layout.tsx` - Integrated onboarding modal -- `DIGITAL_EMPIRE_STRATEGY.md` - 258 lines strategic doc -- `90_DAY_EXECUTION_PLAN.md` - 231 lines tactical doc - -### Quality Assurance - -- βœ… Type-check: 0 errors -- βœ… Lint: All warnings resolved -- βœ… Test coverage ready (Jest configurations present) -- βœ… All code follows existing patterns - ---- - -## πŸš€ 12-Week Roadmap (Phased Execution) - -### Phase 1: Foundation (Weeks 1-4) βœ… **IN PROGRESS** - -**Goal**: Make TradeHax unmistakably clear + addictive - -| Week | Feature | ETA | Status | -|------|---------|-----|--------| -| 1 | Hero clarity + gamification | βœ… | **DONE** | -| 2 | Leaderboard MVP | Next 3 days | *Planned* | -| 3 | Marketplace skeleton | Next 5 days | *Planned* | -| 4 | Dashboard hub unification | Next 7 days | *Planned* | - -**Expected outcomes**: +40% engagement, 5x onboarding completion - -### Phase 2: Network Effects (Weeks 5-8) - -**Goal**: Build compounding growth through referrals + community - -- Week 5: Discord bot native integration (10K+ guild potential) -- Week 6: Referral flywheel ($100 credit mechanic) -- Week 7: Learning center framework (freeβ†’premium upsell) -- Week 8: Community Discord (500+ active members) - -**Expected outcomes**: K coefficient 0.3β†’0.8, new user velocity +300% - -### Phase 3: Monetization (Weeks 9-12) - -**Goal**: Transparent pricing + conversion optimization - -- Week 9: Pricing page redesign + tiered clear benefits -- Week 10-11: A/B testing (landing pages, pricing, flows) -- Week 12: Metrics review + Q2 roadmap - -**Expected outcomes**: 8% conversion rate, $50K/month baseline revenue - ---- - -## πŸ’° 7 Revenue Streams Enabled - -1. **Premium Subscriptions** ($9/mo Pro β†’ $99/mo Council) -2. **Marketplace Revenue Share** (30% commission on creator sales) -3. **Learning Center Certificates** ($49 mastery course = 10% upsell) -4. **Affiliate Links** (existing, now in gamified context) -5. **AdSense** (existing, optimized per tier) -6. **White-label/Consulting** (Council tier) -7. **API Access** (Builder+ tier new revenue) - ---- - -## πŸŽ“ Why This Is a "Digital Empire" Strategy - -### Network Effects - -- **Leaderboards**: Users check daily β†’ 40% DAU increase -- **Marketplace**: Creators earn β†’ 1,000+ AI agents β†’ users buy β†’ flywheel -- **Referral**: Each user invites 3 β†’ K=1.5 β†’ exponential growth -- **Discord**: Users in 100K+ guilds β†’ 0 marginal acquisition cost - -### Defensibility - -- Users invest time in leaderboard rank β†’ switching cost -- Creators build on platform β†’ asset lock-in -- Community β†’ network moat -- Reputation β†’ trust increase - -### Scalability - -- All code is cloud-native (Next.js standalone) -- Discord bot = infinite distribution (no server cost) -- Marketplace = asset library scales with creators -- Learning center = evergreen content (amortizes over time) - -### Monetization - -- Free tier β†’ *discovery* -- Pro tier β†’ *engagement* -- Builder β†’ *monetization* -- Council β†’ *enterprise* -- Marketplace β†’ *compounding* - -**Total TAM Growth**: $0 β†’ $500K/year (by month 12) β†’ $5M+ (by year 2) - ---- - -## πŸ“ˆ Success Metrics (Tracking) - -### User Growth - -- [ ] MAU: 500 β†’ 5K (month 2) β†’ 50K (month 12) βœ“ TRACKABLE -- [ ] Viral Coefficient: 0.3 β†’ 0.8 (month 3) β†’ 1.5+ (month 6) βœ“ TRACKABLE -- [ ] DAU/MAU: 10% β†’ 25% (month 3) β†’ 40% (month 12) βœ“ TRACKABLE - -### Engagement - -- [ ] Leaderboard daily checks: 30% of users (month 3) -- [ ] Avg. session time: 3min β†’ 12min (month 4) -- [ ] Cross-service usage: 5% β†’ 35% (month 6) - -### Monetization - -- [ ] Premium conversion: 2% β†’ 5% (month 2) β†’ 8% (month 4) -- [ ] ARPU: $0.50 β†’ $2 (month 3) β†’ $8 (month 6) -- [ ] CAC: $5 (paid) β†’ $0.50 (referral) (month 3) -- [ ] LTV: $50 β†’ $500 (month 9) - -### Quality - -- [ ] NPS: 40 β†’ 55 (month 6) β†’ 65+ (month 12) -- [ ] Churn: 15% β†’ 8% (month 6) β†’ <5% (month 12) -- [ ] Marketplace GMV: $0 β†’ $50K/month (month 12) - ---- - -## 🎬 Next Action Items (This Week) - -### High Priority - -1. **Leaderboard Component** (3 days) - - Real P&L syncing - - Music chart integration - - Premium "featured" badge - -2. **Marketplace Breadboard** (2 days) - - Agent card component - - Search/filter UI - - Preview modal - -3. **Discord Bot Infrastructure** (2 days) - - Bot app registration - - Scan command handler - - Generate command handler - -### Medium Priority - -- Dashboard hub unification -- Learning center SQL schema -- Referral link generation - -### Nice-to-Have - -- Mobile optimization -- Social sharing buttons -- In-app tutorials - ---- - -## πŸ“š Reference Documents - -**Strategy & Planning**: - -- [DIGITAL_EMPIRE_STRATEGY.md](./DIGITAL_EMPIRE_STRATEGY.md) - 12-week blueprint -- [90_DAY_EXECUTION_PLAN.md](./90_DAY_EXECUTION_PLAN.md) - Tactical roadmap - -**Code Components**: - -- `components/onboarding/GamifiedOnboarding.tsx` - Achievement system -- `app/page.tsx` - Updated hero -- `app/layout.tsx` - Integration point - -**Deployment**: - -- Last run: `ef26968` (eslint-clean) -- Status: Queued for testing -- Strategy: Prebuilt artifacts via tar-over-SSH - ---- - -## πŸ† The Win - -**Before this session:** - -- 3 disconnected tools -- Unclear value prop -- No network effects -- Low monthly engagement -- Flat growth trajectory - -**After this session:** - -- Unified AI agent platform narrative -- Crystal-clear "for whom" messaging -- Network effects engine designed -- Gamified engagement system -- Exponential growth trajectory (K=1.5+) - -**Result**: TradeHax transforms from feature-heavy product into **network effect platform**. - ---- - -## πŸ’¬ Quote - -> "If you build it right, users don't just use your platformβ€”they invite their friends. They compete on your leaderboards. They build on your marketplace. That's not a product. That's an empire." - -**Your mission**: Execute this plan weekly. Track metrics religiously. Adjust based on data. - -**The ultimate goal**: By month 12, TradeHax is the obvious choice for anyone wanting to: - -- Get better at trading -- Create music faster -- Build AI services - -Not because of features. Because everyone who matters is already there. - ---- - -**Let's build it.** πŸš€ diff --git a/Untitled-1.yml b/Untitled-1.yml deleted file mode 100644 index eb5705b3..00000000 --- a/Untitled-1.yml +++ /dev/null @@ -1,27 +0,0 @@ -name: Deploy to Namecheap cPanel - -on: - push: - branches: - - main - -jobs: - deploy: - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Deploy via SSH - uses: appleboy/ssh-action@v0.1.10 - with: - host: ${{ secrets.CPANEL_HOST }} - username: ${{ secrets.CPANEL_USER }} - password: ${{ secrets.CPANEL_PASS }} - port: 22 - script: | - cd /home/tradehax/tradehax - git pull origin main - npm install --legacy-peer-deps - npm run build - npm run start:cpanel \ No newline at end of file diff --git a/VERCEL_BRANCH_FIX.md b/VERCEL_BRANCH_FIX.md deleted file mode 100644 index 5e63bf7d..00000000 --- a/VERCEL_BRANCH_FIX.md +++ /dev/null @@ -1,178 +0,0 @@ -# 🚨 URGENT: Fix Vercel Deployment - Wrong Branch Configuration - -## Problem -Your Vercel deployment is **failing with this error**: -``` -npm error enoent Could not read package.json: Error: ENOENT: no such file or directory -Command "npm install" exited with 254 -``` - -## Root Cause -Vercel is configured to deploy from the **`gh-pages` branch** instead of the **`main` branch**. - -- ❌ **`gh-pages` branch**: Contains only static HTML/CSS/JS files (build output for GitHub Pages) -- βœ… **`main` branch**: Contains source code, package.json, and everything Vercel needs to build - -## The Fix (Takes 5 minutes) - -### Step 1: Change Production Branch in Vercel -1. Go to your [Vercel Dashboard](https://vercel.com/dashboard) -2. Select your project (`main` or similar name) -3. Click **Settings** in the top menu -4. Click **Git** in the left sidebar -5. Find **"Production Branch"** -6. Change from `gh-pages` to `main` -7. Click **Save** - -### Step 2: Update Next.js Configuration for Vercel - -The repository is currently configured for static export (`output: 'export'` in `next.config.ts`), which is suitable for GitHub Pages but **not optimal for Vercel**. You have two options: - -#### Option A: Keep Static Export (Simpler, works for both) -This works but doesn't leverage Vercel's full Next.js capabilities (no API routes, no SSR). - -**No changes needed** - Vercel can deploy static exports, but update the output directory in vercel.json: - -1. Edit `vercel.json` locally or in Vercel Dashboard β†’ Settings β†’ General -2. Change `"outputDirectory": "out"` (should already be set) -3. Ensure `"buildCommand": "npm run build"` is set - -#### Option B: Use Dynamic Next.js for Vercel (Recommended) -This gives you full Next.js features on Vercel while keeping static export for GitHub Pages. - -**Create environment-specific build:** - -1. Create a new npm script in `package.json`: -```json -"scripts": { - "build": "next build", - "build:static": "next build", - "build:vercel": "next build" -} -``` - -2. Create `next.config.vercel.ts` (Vercel-specific config): -```typescript -import type { NextConfig } from "next"; - -const nextConfig: NextConfig = { - // No output: 'export' - use full Next.js features - reactStrictMode: false, - images: { - remotePatterns: [ - { protocol: 'https', hostname: 'tradehaxai.tech' }, - { protocol: 'https', hostname: '*.vercel.app' }, - ], - }, - experimental: { - optimizePackageImports: ['lucide-react', '@solana/wallet-adapter-react'], - }, - eslint: { ignoreDuringBuilds: true }, - typescript: { ignoreBuildErrors: true }, -}; - -export default nextConfig; -``` - -3. In Vercel Dashboard β†’ Settings β†’ General: - - Set **Build Command**: `npm run build` - - Set **Output Directory**: `.next` - - Keep **Framework Preset**: Next.js - -**Skip this step for now** - the simpler Option A should work first. - -### Step 3: Trigger a New Deployment -1. Stay in the Vercel Dashboard -2. Click **Deployments** in the top menu -3. Find the latest deployment (it will show as "Failed") -4. Click the **"..."** menu button -5. Click **Redeploy** -6. Check **"Use existing Build Cache"** = OFF (uncheck it) -7. Click **Redeploy** button - -### Step 4: Verify vercel.json Settings - -Ensure your `vercel.json` has these settings: -```json -{ - "buildCommand": "npm run build", - "outputDirectory": "out", - "framework": "nextjs" -} -``` - -These are already correct in your repository. - -### Step 5: Verify Success -1. Wait 2-3 minutes for the build to complete -2. The deployment status should change to **"Ready"** with a green checkmark -3. Visit https://tradehaxai.tech to confirm the site is live - -## Why This Happened - -This repository has **two deployment strategies**: - -1. **GitHub Pages** (via `gh-pages` branch) - - Automated by `.github/workflows/github-pages.yml` - - Builds site and pushes static files to `gh-pages` branch - - Used for GitHub's free hosting at `username.github.io/repo` - -2. **Vercel** (should use `main` branch) - - Automated by `.github/workflows/vercel-deploy.yml` - - Builds from source code on `main` branch - - Used for custom domain https://tradehaxai.tech - -When Vercel was configured, the wrong branch was selected. This caused Vercel to try building from pre-built static files instead of source code. - -## Visual Guide - -### What Vercel Currently Sees (Wrong): -``` -gh-pages branch -β”œβ”€β”€ index.html ← Static HTML -β”œβ”€β”€ _next/ ← Built JavaScript -β”œβ”€β”€ assets/ ← Images, CSS -└── 404.html -❌ NO package.json -❌ NO source code -``` - -### What Vercel Should See (Correct): -``` -main branch -β”œβ”€β”€ package.json βœ… Dependencies -β”œβ”€β”€ app/ βœ… Source code -β”œβ”€β”€ components/ βœ… React components -β”œβ”€β”€ next.config.ts βœ… Next.js config -└── vercel.json βœ… Vercel settings -``` - -## Additional Notes - -### Both Deployments Can Coexist -- **GitHub Pages**: `gh-pages` branch β†’ GitHub hosting (backup/alternate) -- **Vercel**: `main` branch β†’ Primary deployment with custom domain - -### If You Only Want Vercel -You can disable GitHub Pages deployments: -1. Go to repository Settings β†’ Pages -2. Set Source to "None" -3. Or disable `.github/workflows/github-pages.yml` - -### If Build Still Fails After Fix -Check these common issues: -1. Missing Vercel environment variables (see `VERCEL_DEPLOYMENT_TROUBLESHOOTING.md`) -2. Missing GitHub secrets (`VERCEL_TOKEN`, `VERCEL_ORG_ID`, `VERCEL_PROJECT_ID`) -3. Build errors in code (test locally with `npm run build`) - -## Need More Help? - -- **Full troubleshooting guide**: See `VERCEL_DEPLOYMENT_TROUBLESHOOTING.md` -- **Domain setup**: See `VERCEL_DOMAIN_SETUP.md` -- **Quick start**: See `DEPLOYMENT_QUICKSTART.md` - ---- - -**Priority**: πŸ”΄ **CRITICAL - Site is down** -**Time to Fix**: ⏱️ **2 minutes** -**Difficulty**: 🟒 **Easy - No code changes needed** diff --git a/VERCEL_DEPLOYMENT_TROUBLESHOOTING.md b/VERCEL_DEPLOYMENT_TROUBLESHOOTING.md deleted file mode 100644 index 5c0b4484..00000000 --- a/VERCEL_DEPLOYMENT_TROUBLESHOOTING.md +++ /dev/null @@ -1,544 +0,0 @@ -# Vercel Deployment Troubleshooting Guide - -## Overview -This guide helps you diagnose and resolve common issues when deploying to https://tradehaxai.tech via Vercel. - ---- - -## Quick Diagnostic Checklist - -If your site is not live after pushing to main, work through this checklist: - -### 1. GitHub Actions Status βœ… -- [ ] Navigate to: https://github.com/DarkModder33/main/actions -- [ ] Check that the "Deploy to Vercel" workflow ran successfully -- [ ] Look for green checkmarks, not red X marks -- [ ] Review workflow logs if deployment failed - -### 2. Vercel Dashboard Status βœ… -- [ ] Log into [Vercel Dashboard](https://vercel.com/dashboard) -- [ ] Find your project and check latest deployment status -- [ ] Verify deployment shows as "Ready" not "Failed" or "Building" -- [ ] Check deployment logs for any errors - -### 3. DNS Configuration βœ… -- [ ] Verify DNS records in your domain registrar -- [ ] Check A record: `@` β†’ `76.76.21.21` -- [ ] Check CNAME record: `www` β†’ `cname.vercel-dns.com.` -- [ ] Confirm verification TXT record: `_vercel` β†’ `vc-domain-verify=tradehaxai.tech,9b1517380c738599577c` - -### 4. Domain Status in Vercel βœ… -- [ ] Go to Vercel Dashboard β†’ Settings β†’ Domains -- [ ] Verify tradehaxai.tech shows "Valid Configuration" -- [ ] Check SSL certificate status is "Active" -- [ ] Ensure domain is assigned to correct project - -### 5. DNS Propagation βœ… -- [ ] Visit [DNS Checker](https://dnschecker.org) -- [ ] Enter `tradehaxai.tech` and check A record globally -- [ ] Verify most locations show correct IP: 76.76.21.21 -- [ ] If not propagated, wait and check again in 1 hour - ---- - -## Common Issues & Solutions - -### Issue 1: GitHub Actions Workflow Fails - -#### Symptom -- Workflow shows red X mark -- Deployment never reaches Vercel -- Error in workflow logs - -#### Diagnosis -Check the workflow logs in GitHub Actions: -1. Go to: https://github.com/DarkModder33/main/actions -2. Click on the failed workflow run -3. Expand the failed step to see error message - -#### Common Causes & Fixes - -**Missing VERCEL_TOKEN Secret** -``` -Error: missing required environment variable VERCEL_TOKEN -``` -**Solution**: -1. Go to Vercel Dashboard β†’ Settings β†’ Tokens -2. Create a new token with name "GitHub Actions" -3. Copy the token value -4. Go to GitHub repo β†’ Settings β†’ Secrets and variables β†’ Actions -5. Click "New repository secret" -6. Name: `VERCEL_TOKEN`, Value: [paste token] -7. Click "Add secret" - -**Missing VERCEL_ORG_ID or VERCEL_PROJECT_ID** -``` -Error: missing required VERCEL_ORG_ID or VERCEL_PROJECT_ID -``` -**Solution**: -1. In your local project directory, run: `vercel link` -2. This creates `.vercel/project.json` with your IDs -3. Extract values from that file: - - `projectId` β†’ Set as GitHub secret `VERCEL_PROJECT_ID` - - `orgId` β†’ Set as GitHub secret `VERCEL_ORG_ID` - -**Build Fails** -``` -Error: Command "npm run build" failed -``` -**Solution**: -1. Test build locally: `npm run build` -2. Fix any build errors in your code -3. Commit and push fixes -4. Verify build passes locally before pushing - ---- - -### Issue 2: Vercel Build Fails - -#### Symptom -- GitHub Actions succeeds -- Vercel shows "Build Failed" in dashboard -- Deployment logs show errors - -#### Diagnosis -Check Vercel deployment logs: -1. Go to Vercel Dashboard β†’ Deployments -2. Click on the failed deployment -3. Review build logs for specific errors - -#### Common Causes & Fixes - -**Wrong Branch Configuration - Deploying from gh-pages** -``` -Error: ENOENT: no such file or directory, open '/vercel/path0/package.json' -npm error enoent Could not read package.json -Command "npm install" exited with 254 -``` -**Problem**: Vercel is trying to deploy from the `gh-pages` branch, which only contains static build output (HTML/CSS/JS files) without source code or package.json. - -**Solution**: -1. Go to Vercel Dashboard β†’ Settings β†’ Git -2. Under "Production Branch", change from `gh-pages` to `main` -3. Click "Save" -4. Go to Deployments tab -5. Click "Redeploy" on the latest deployment -6. The build should now succeed - -**Why this happens**: The `gh-pages` branch is used for GitHub Pages deployment (static hosting) and contains only the build output from the `out/` directory. Vercel needs the source code from the `main` branch to run `npm install` and `npm run build`. - -**Missing Dependencies** -``` -Error: Cannot find module 'some-package' -``` -**Solution**: -1. Ensure package is in `package.json` dependencies -2. Run `npm install` locally to verify -3. Commit and push updated `package.json` and `package-lock.json` - -**Environment Variable Missing** -``` -Error: NEXT_PUBLIC_SOME_VAR is not defined -``` -**Solution**: -1. Go to Vercel Dashboard β†’ Settings β†’ Environment Variables -2. Add the missing variable -3. Set appropriate value -4. Select "Production" environment -5. Redeploy from Deployments tab - -**Out of Memory** -``` -Error: JavaScript heap out of memory -``` -**Solution**: -1. Check for memory leaks in code -2. Optimize large data processing -3. Consider upgrading Vercel plan for more memory -4. Add `NODE_OPTIONS="--max-old-space-size=4096"` to build command - -**TypeScript Errors** -``` -Error: Type error: Property 'x' does not exist on type 'Y' -``` -**Solution**: -1. Fix TypeScript errors locally: `npm run lint` -2. Run `npm run build` to catch all errors -3. Commit fixes and push - ---- - -### Issue 3: Domain Not Resolving - -#### Symptom -- Build succeeds -- Vercel deployment is Ready -- Site doesn't load at tradehaxai.tech -- Shows "This site can't be reached" error - -#### Diagnosis -```bash -# Check DNS resolution -nslookup tradehaxai.tech - -# Check from multiple locations -# Visit: https://dnschecker.org -``` - -#### Common Causes & Fixes - -**DNS Records Not Added** -**Solution**: Follow [VERCEL_DOMAIN_SETUP.md](./VERCEL_DOMAIN_SETUP.md) to add required records - -**DNS Not Propagated Yet** -**Solution**: -- Wait 24-48 hours for full propagation -- Check progress at https://dnschecker.org -- Use `nslookup tradehaxai.tech` to test locally - -**Wrong DNS Values** -**Solution**: -- Verify A record value is exactly: `76.76.21.21` -- Verify CNAME value is exactly: `cname.vercel-dns.com.` -- Fix any typos or incorrect values - -**Conflicting DNS Records** -**Solution**: -- Remove old GitHub Pages A records -- Remove old CNAME records -- Keep only Vercel-specific records - -**Wrong Nameservers** -**Solution**: -- Verify nameservers point to your DNS provider (e.g., Namecheap) -- Do NOT use Vercel nameservers unless you transferred domain management -- Check in domain registrar β†’ Nameserver settings - ---- - -### Issue 4: Domain Verification Failed - -#### Symptom -- Vercel shows "Domain verification pending" -- Can't add custom domain -- Error: "Domain verification failed" - -#### Diagnosis -Check TXT record: -```bash -# Check if TXT record exists -nslookup -type=TXT _vercel.tradehaxai.tech - -# Or use online tool -# Visit: https://mxtoolbox.com/TXTLookup.aspx -``` - -#### Solution -1. Add TXT record to DNS: - - **Name**: `_vercel` - - **Value**: `vc-domain-verify=tradehaxai.tech,9b1517380c738599577c` - - **TTL**: 3600 -2. Wait 10 minutes for DNS propagation -3. Click "Retry" in Vercel dashboard -4. If still failing after 1 hour, double-check the value matches exactly - ---- - -### Issue 5: SSL Certificate Not Provisioning - -#### Symptom -- Domain resolves correctly -- Shows "Not Secure" in browser -- SSL certificate pending or failed -- ERR_CERT_COMMON_NAME_INVALID error - -#### Diagnosis -1. Check Vercel Dashboard β†’ Settings β†’ Domains -2. Look for SSL status next to your domain -3. Check browser error details - -#### Common Causes & Fixes - -**DNS Not Fully Propagated** -**Solution**: Wait for DNS to propagate globally before SSL can be issued - -**CAA Records Blocking Let's Encrypt** -**Solution**: -1. Check for CAA records: `nslookup -type=CAA tradehaxai.tech` -2. If present, ensure Let's Encrypt is allowed -3. Add CAA record: `0 issue "letsencrypt.org"` - -**Domain Verification Failed** -**Solution**: Ensure TXT record for domain verification is added (see Issue 4) - -**Too Many Certificate Requests** -**Solution**: -- Let's Encrypt has rate limits -- Wait 1 hour before retrying -- Don't keep removing and re-adding domain - -**Fix by Removing and Re-adding Domain** -**Solution**: -1. Vercel Dashboard β†’ Settings β†’ Domains -2. Click "β‹―" next to tradehaxai.tech -3. Click "Remove" -4. Wait 5 minutes -5. Add domain again -6. SSL should provision within 15 minutes - ---- - -### Issue 6: Site Loads but Shows 404 for All Pages - -#### Symptom -- Domain resolves -- Site loads but all pages show 404 -- Assets (images, CSS) may not load - -#### Common Causes & Fixes - -**Wrong Build Output Directory** -**Solution**: -1. Verify `vercel.json` does NOT have `"outputDirectory"` set (let Vercel use default `.next`) -2. Ensure Next.js is building correctly: `npm run build` -3. Check `.next` directory exists after build -4. Note: This repo uses conditional static export - GitHub Pages uses `out/`, Vercel uses `.next/` - -**Framework Not Detected** -**Solution**: -1. Ensure `vercel.json` has: `"framework": "nextjs"` -2. Redeploy after updating vercel.json - -**Build Command Wrong** -**Solution**: -1. Verify `vercel.json` has: `"buildCommand": "npm run build"` -2. Ensure package.json has: `"build": "next build"` - ---- - -### Issue 7: Environment Variables Not Working - -#### Symptom -- Build succeeds -- Site loads but features don't work -- API calls fail -- Console shows "undefined" for env vars - -#### Diagnosis -Check browser console: -```javascript -// Open browser DevTools β†’ Console -console.log(process.env.NEXT_PUBLIC_APP_URL) -// Should show: https://tradehaxai.tech -``` - -#### Solution -1. Go to Vercel Dashboard β†’ Settings β†’ Environment Variables -2. Verify all `NEXT_PUBLIC_*` variables are set -3. Ensure they're enabled for "Production" environment -4. After adding/updating, go to Deployments tab -5. Click "β‹―" on latest deployment β†’ "Redeploy" -6. Check "Use existing Build Cache" = OFF -7. Click "Redeploy" - -**Important**: Environment variables starting with `NEXT_PUBLIC_` are embedded at build time. Changing them requires a rebuild. - ---- - -### Issue 8: Assets Loading from Wrong Domain - -#### Symptom -- Site loads but shows broken images -- CSS doesn't apply -- Console shows 404s for assets -- Assets loading from localhost or wrong domain - -#### Solution -1. Update environment variable: - - **Name**: `NEXT_PUBLIC_APP_URL` - - **Value**: `https://tradehaxai.tech` -2. Ensure next.config.ts has correct image domains -3. Redeploy (without cache) -4. Clear browser cache: Ctrl+Shift+R (or Cmd+Shift+R on Mac) - ---- - -### Issue 9: Old Version of Site Showing - -#### Symptom -- Pushed new code -- GitHub Actions succeeded -- Vercel shows new deployment -- Browser still shows old version - -#### Solution - -**Clear Browser Cache** -``` -Chrome/Edge: Ctrl+Shift+R (Cmd+Shift+R on Mac) -Firefox: Ctrl+F5 (Cmd+Shift+R on Mac) -Safari: Cmd+Option+R -``` - -**Clear Vercel Edge Cache** -1. Vercel Dashboard β†’ Deployments -2. Click on latest production deployment -3. Click "Clear Cache" -4. Wait 2-3 minutes -5. Hard refresh browser - -**Check if Correct Deployment is Production** -1. Vercel Dashboard β†’ Deployments -2. Verify latest deployment has "Production" label -3. If not, click "β‹―" β†’ "Promote to Production" - ---- - -## Vercel Dashboard Health Checks - -### Required Configuration - -#### Settings β†’ General -- [x] **Project Name**: Matches your repository -- [x] **Framework Preset**: Next.js -- [x] **Build Command**: `npm run build` -- [x] **Output Directory**: Leave blank or set to `.next` (Vercel auto-detects) -- [x] **Install Command**: `npm install` -- [x] **Root Directory**: `./` (or leave blank) - -#### Settings β†’ Git -- [x] **Connected Repository**: DarkModder33/main -- [x] **Production Branch**: `main` ⚠️ **IMPORTANT: Must be `main`, NOT `gh-pages`** -- [x] **Automatic Deployments**: Enabled -- [x] **Deploy Hooks**: Optional (can coexist with GitHub Actions) - -**Note**: The `gh-pages` branch is used exclusively for GitHub Pages static hosting and contains only build output. Vercel must deploy from the `main` branch which contains source code. - -#### Settings β†’ Domains -- [x] **tradehaxai.tech**: Valid Configuration βœ… -- [x] **SSL Status**: Active βœ… -- [x] **DNS Status**: Configured βœ… - -#### Settings β†’ Environment Variables -At minimum, set these for Production: -- [x] `NEXT_PUBLIC_APP_URL` = `https://tradehaxai.tech` -- [x] `NEXT_PUBLIC_SOLANA_NETWORK` = `mainnet-beta` -- [x] `NEXT_PUBLIC_SOLANA_RPC` = [Your RPC URL] - -### Deployment Status -- [x] Latest deployment: **Ready** (green) -- [x] Build time: < 5 minutes -- [x] No errors in build logs -- [x] Production deployment active - ---- - -## Testing After Deployment - -Run through this test checklist after any deployment: - -### Basic Functionality -- [ ] Visit https://tradehaxai.tech -- [ ] Site loads without errors -- [ ] HTTPS padlock shows in browser -- [ ] No mixed content warnings -- [ ] All images load correctly -- [ ] Navigation works -- [ ] Console has no critical errors - -### Wallet Integration -- [ ] Wallet connect button visible -- [ ] Can open wallet modal -- [ ] Can connect Solana wallet (if installed) -- [ ] No errors in console related to Web3 - -### Performance -- [ ] First load is under 3 seconds -- [ ] No layout shift (CLS issues) -- [ ] Images are optimized -- [ ] Lighthouse score > 90 - -### Analytics -- [ ] Vercel Analytics tracking visits -- [ ] Google Analytics receiving data (if configured) -- [ ] No tracking errors in console - ---- - -## Emergency Rollback - -If a deployment breaks the site: - -### Option 1: Rollback in Vercel -1. Vercel Dashboard β†’ Deployments -2. Find last working deployment -3. Click "β‹―" β†’ "Promote to Production" -4. Site reverts in ~30 seconds - -### Option 2: Revert Git Commit -```bash -# Identify bad commit -git log --oneline - -# Revert the commit -git revert - -# Push to trigger new deployment -git push origin main -``` - -### Option 3: Emergency Fix -1. Fix the issue in code -2. Commit with clear message: "fix: emergency fix for [issue]" -3. Push to main -4. Monitor deployment in real-time - ---- - -## Getting Help - -### Vercel Support -- **Dashboard**: Support button in bottom right -- **Email**: support@vercel.com -- **Docs**: https://vercel.com/docs -- **Status Page**: https://vercel-status.com - -### GitHub Actions Support -- **Docs**: https://docs.github.com/en/actions -- **Community Forum**: https://github.community - -### DNS/Domain Support -- **Namecheap**: https://namecheap.com/support -- **DNS Tools**: https://dnschecker.org, https://mxtoolbox.com - -### Project-Specific Issues -- Check repository README.md -- Review existing documentation -- Open GitHub issue in repository - ---- - -## Prevention Checklist - -Prevent deployment issues before they happen: - -### Before Pushing to Main -- [ ] Test build locally: `npm run build` -- [ ] Run linter: `npm run lint` -- [ ] Test locally: `npm run dev` and verify functionality -- [ ] Check for TypeScript errors -- [ ] Review changes for hardcoded URLs or paths - -### Regular Maintenance -- [ ] Monitor Vercel deployment status weekly -- [ ] Check DNS configuration monthly -- [ ] Review SSL certificate status -- [ ] Update dependencies regularly -- [ ] Test rollback procedures -- [ ] Document any custom configurations - ---- - -**Last Updated**: 2026-01-28 -**For**: tradehaxai.tech Vercel Deployment -**Maintained By**: DarkModder33 diff --git a/VERCEL_DIAGNOSIS.md b/VERCEL_DIAGNOSIS.md deleted file mode 100644 index 753152b9..00000000 --- a/VERCEL_DIAGNOSIS.md +++ /dev/null @@ -1,197 +0,0 @@ -# Vercel + Namecheap Deployment Diagnostics - -## Your Current Setup -- **Repository:** GitHub (DarkModder33/main) -- **Host:** Vercel -- **Domains:** tradehaxai.tech, tradehax.net (via Namecheap DNS) - ---- - -## πŸ” Diagnostic Checklist - -### 1. Is Vercel Connected to GitHub? -**Check in Vercel Dashboard:** -1. Login to [vercel.com](https://vercel.com) -2. Go to your project -3. Settings β†’ Git Integration -4. Should show: "Connected to GitHub repository: DarkModder33/main" - -If NOT connected: -- Go to Project Settings β†’ Git -- Click "Connect Git Repository" -- Select your repo: `DarkModder33/main` - -### 2. Are DNS Records Pointing to Vercel? -**Check in Namecheap DNS:** -1. Login to [namecheap.com](https://namecheap.com) -2. Go to Domain β†’ DNS Records -3. Look for CNAME or A records pointing to Vercel - -**Should look like:** -- `tradehaxai.tech` β†’ CNAME β†’ `cname.vercel-dns.com` (or Vercel's nameservers) -- `tradehax.net` β†’ CNAME β†’ `cname.vercel-dns.com` (or Vercel's nameservers) - -If NOT configured: -- In Vercel dashboard, go to Domains -- Add your domain and follow Vercel's DNS setup wizard -- Update Namecheap DNS records with Vercel's nameservers - -### 3. Check Recent Deployments in Vercel -1. Go to [vercel.com](https://vercel.com) β†’ Your Project -2. Click "Deployments" tab -3. Should show recent deployments with timestamps -4. Click each one to see build logs - -**If build is failing:** -- Click the failed deployment -- Look at "Build Logs" section -- Common issues: - - Missing environment variables - - Build errors in code - - Dependency conflicts - -### 4. Check Build Settings in Vercel -**Settings β†’ Build & Development Settings:** -- Build Command: Should be `npm run build` (or let Vercel detect) -- Output Directory: Should be `.next` or auto-detected -- Install Command: Should be `npm install` (or auto-detected) - -### 5. Environment Variables in Vercel -**Settings β†’ Environment Variables:** -Should include (if needed for your app): -- `NEXT_PUBLIC_SITE_URL` -- `NEXT_PUBLIC_SOLANA_NETWORK` -- Any other `NEXT_PUBLIC_*` variables from `.env.example` - ---- - -## πŸš€ How It Should Work - -When you push to `main`: -1. βœ… GitHub notifies Vercel of new commit -2. βœ… Vercel clones your repo -3. βœ… Vercel runs `npm run build` -4. βœ… Build output deployed to Vercel CDN -5. βœ… DNS points domains to Vercel -6. βœ… Changes live in 1-2 minutes - ---- - -## πŸ› Troubleshooting: Changes Not Appearing - -### Step 1: Verify Git Push Succeeded -```bash -git status -# Should show: "Your branch is up to date with 'origin/main'" - -git log --oneline -3 -# Should show your recent commits -``` - -### Step 2: Check Vercel Saw the Push -1. Go to Vercel Dashboard β†’ Your Project -2. Go to "Deployments" tab -3. Should see a new deployment triggered 1-2 minutes after your push -4. If NO new deployment: - - Git integration not connected - - Wrong branch (check you pushed to `main`) - - Webhook issue - -### Step 3: Check if Build Succeeded -1. Click the deployment in Vercel -2. Look for "βœ… Deployment Complete" or "❌ Build Failed" -3. If failed, click "Build Logs" and look for errors - -### Step 4: Clear Cache -```bash -# Hard refresh browser (depends on OS) -Windows/Linux: Ctrl + Shift + R -Mac: Cmd + Shift + R - -# Or use DevTools -F12 β†’ Application tab β†’ Clear Site Data -``` - -### Step 5: Check DNS Resolution -```bash -# Verify domains point to Vercel -nslookup tradehaxai.tech -nslookup tradehax.net - -# Should resolve to Vercel IP (usually 76.76.19.* or similar) -``` - -### Step 6: Check Vercel Domain Status -In Vercel Dashboard: -1. Go to Settings β†’ Domains -2. Each domain should show: "βœ… Valid Configuration" -3. If "❌ Invalid Configuration": - - Follow Vercel's DNS setup instructions - - Update Namecheap DNS records - - Wait 24-48 hours for propagation - ---- - -## πŸ“‹ Step-by-Step: Enable Auto-Deploy - -If Vercel isn't auto-deploying: - -### In Vercel Dashboard: -1. Go to your project -2. Settings β†’ Git Integration -3. Change "Deploy on Push" from off to ON -4. Select branch: `main` -5. Save - -### In Namecheap: -1. Login and go to Domain -2. Click "Advanced DNS" -3. Add/update Vercel nameservers (Vercel will provide them) -4. Remove old nameservers -5. Save - -### Verify It Works: -```bash -# Make a test change -echo "" >> app/page.tsx - -# Commit and push -git add . -git commit -m "test: verify auto-deploy" -git push origin main - -# Watch Vercel Dashboard for deployment (1-2 min) -# Check your domain in browser (5-10 min after deployment) -``` - ---- - -## πŸ”— Important Links - -- **Vercel Dashboard:** https://vercel.com -- **Namecheap Account:** https://namecheap.com -- **Vercel DNS Setup:** https://vercel.com/docs/projects/domains/configure-a-custom-domain -- **GitHub Integration:** https://vercel.com/docs/git/github - ---- - -## ⚠️ Common Issues - -| Issue | Solution | -|-------|----------| -| New deployments not triggering | Check Git Integration connected in Vercel | -| Build fails silently | Check Vercel Deployments tab for errors | -| Domain shows old content | Hard refresh browser + check DNS | -| DNS not resolving | Check Namecheap records point to Vercel | -| 404 after deploy | Check build output directory in Vercel settings | - ---- - -## Next Steps - -1. **Verify Vercel Git Integration** is connected (most likely issue) -2. **Check recent deployments** in Vercel Dashboard -3. **Compare timestamps**: Does new deployment show after your push? -4. **If no new deployment**: Re-connect GitHub integration -5. **If build fails**: Check build logs for errors -6. **If DNS issue**: Follow Vercel's domain setup wizard diff --git a/VERCEL_DOMAIN_SETUP.md b/VERCEL_DOMAIN_SETUP.md deleted file mode 100644 index e1623498..00000000 --- a/VERCEL_DOMAIN_SETUP.md +++ /dev/null @@ -1,261 +0,0 @@ -# Vercel Domain Setup for tradehaxai.tech - -## Overview -This document provides step-by-step instructions for configuring the custom domain `tradehaxai.tech` to work with your Vercel deployment. These steps must be completed manually through your domain registrar's DNS settings and Vercel dashboard. - -> **πŸ“‹ Need DNS Inspection?** If you need to verify your current DNS configuration or troubleshoot issues, see [DNS_INSPECTION_REPORT.md](./DNS_INSPECTION_REPORT.md) for a detailed analysis of your DNS records. - -## Prerequisites -- βœ… Domain registered: tradehaxai.tech (via domain registrar like Namecheap, GoDaddy, etc.) -- βœ… Vercel account with project deployed -- βœ… Access to domain DNS settings -- βœ… GitHub repository connected to Vercel - ---- - -## Step 1: Domain Verification (REQUIRED) - -**⚠️ IMPORTANT**: Before your custom domain will work on Vercel, you must add a domain verification TXT record to your DNS settings. - -### Add Vercel Domain Verification TXT Record - -In your domain registrar's DNS management interface (e.g., Namecheap Advanced DNS, GoDaddy DNS Management), add the following TXT record: - -``` -Type: TXT -Name: _vercel -Value: vc-domain-verify=tradehaxai.tech,9b1517380c738599577c -TTL: 3600 (or Auto) -``` - -**⚠️ CRITICAL WARNING - COMMON MISTAKE**: -- ❌ **WRONG**: Do NOT use `cname.vercel-dns.com.` as the value for the `_vercel` TXT record -- βœ… **CORRECT**: Use the verification string format: `vc-domain-verify=tradehaxai.tech,XXXXXXXXXXXXX` -- The `_vercel` record must be a **TXT record** with a **verification string**, NOT a CNAME domain -- The verification string starts with `vc-domain-verify=` followed by your domain and a unique code - -**How to Get Your Verification String**: -1. Go to Vercel Dashboard β†’ Your Project β†’ Settings β†’ Domains -2. Click "Add Domain" and enter `tradehaxai.tech` -3. Vercel will display the exact TXT record value you need -4. Copy the entire value (e.g., `vc-domain-verify=tradehaxai.tech,9b1517380c738599577c`) -5. Add this as the value for your `_vercel` TXT record in your DNS settings - -**Note**: This verification record is specific to your Vercel account and domain. It must be added for Vercel to allow the custom domain to be configured. - -### Why This Step is Manual -- DNS records must be configured outside of source control for security reasons -- Each domain registrar has a different interface for DNS management -- The verification record is tied to your specific Vercel account -- This is a one-time setup per domain - ---- - -## Step 2: Configure DNS Records - -After adding the verification TXT record, configure the following DNS records to point your domain to Vercel: - -### A Record (Apex Domain) -``` -Type: A -Name: @ (or leave blank for apex domain) -Value: 76.76.21.21 -TTL: 3600 -``` - -### CNAME Record (WWW Subdomain) -``` -Type: CNAME -Name: www -Value: cname.vercel-dns.com. -TTL: 3600 -``` - -**Important Notes:** -- Remove any conflicting A or CNAME records (e.g., old GitHub Pages records) -- The CNAME value must end with a dot (.) in some DNS providers -- Changes can take 5 minutes to 48 hours to propagate globally - ---- - -## Step 3: Add Domain in Vercel Dashboard - -1. Log into [Vercel Dashboard](https://vercel.com/dashboard) -2. Navigate to your project (it will be named after your GitHub repository) -3. Go to **Settings** β†’ **Domains** -4. Click **Add Domain** -5. Enter: `tradehaxai.tech` -6. Click **Add** -7. Vercel will verify the DNS records and TXT verification -8. Repeat for `www.tradehaxai.tech` if desired - ---- - -## Step 4: Verify Deployment - -### Check DNS Propagation -Use [DNS Checker](https://dnschecker.org) to verify: -- `tradehaxai.tech` β†’ Points to 76.76.21.21 -- `www.tradehaxai.tech` β†’ CNAME to cname.vercel-dns.com - -### Verify SSL Certificate -- Vercel will automatically provision an SSL certificate after DNS propagation -- This usually takes 5-15 minutes after DNS is verified -- Check Vercel Dashboard β†’ Domains for "Valid Configuration" status - -### Test Your Site -Visit these URLs to confirm: -- βœ… https://tradehaxai.tech - Main site loads with HTTPS -- βœ… https://www.tradehaxai.tech - Redirects or loads correctly -- βœ… http://tradehaxai.tech - Redirects to HTTPS - ---- - -## Troubleshooting - -### Domain Verification Failed -**Issue**: Vercel shows "Domain verification failed" error - -**Solutions**: -1. Double-check the TXT record value matches exactly: `vc-domain-verify=tradehaxai.tech,9b1517380c738599577c` -2. Ensure the TXT record name is `_vercel` (underscore included) -3. Wait 5-10 minutes for DNS changes to propagate -4. Use [DNS Checker](https://dnschecker.org) to verify TXT record: enter `_vercel.tradehaxai.tech` -5. If still failing after 1 hour, contact Vercel support - -### DNS Not Resolving -**Issue**: Domain doesn't point to Vercel after adding A/CNAME records - -**Solutions**: -- Wait 24-48 hours for full DNS propagation -- Verify DNS records are correct (A: 76.76.21.21, CNAME: cname.vercel-dns.com) -- Check that nameservers point to your DNS provider (not Vercel nameservers) -- Flush local DNS cache: - - **Windows**: `ipconfig /flushdns` - - **Mac**: `sudo dscacheutil -flushcache` - - **Linux**: `sudo systemd-resolve --flush-caches` - -### SSL Certificate Not Provisioning -**Issue**: Site shows "Not Secure" or SSL error after 24 hours - -**Solutions**: -1. Ensure DNS is fully propagated first (use dnschecker.org) -2. Verify domain shows "Valid Configuration" in Vercel dashboard -3. Wait 30 minutes after DNS propagates -4. Try removing and re-adding the domain in Vercel -5. Check for CAA DNS records that might block Let's Encrypt -6. Contact Vercel support if issue persists after 48 hours - -### Mixed Content Warnings -**Issue**: Some assets load over HTTP instead of HTTPS - -**Solutions**: -- Update `NEXT_PUBLIC_APP_URL` environment variable to `https://tradehaxai.tech` -- Redeploy the application after updating environment variables -- Check that all asset URLs in code use relative paths or HTTPS -- Clear browser cache and hard refresh (Ctrl+Shift+R) - -### Build Failures After Domain Setup -**Issue**: Deployments fail after adding custom domain - -**Solutions**: -1. Check build logs in Vercel Dashboard β†’ Deployments -2. Verify all environment variables are set correctly -3. Test build locally: `npm run build` -4. Ensure all dependencies are in package.json -5. Check for any hardcoded URLs that need updating - ---- - -## Vercel Dashboard Checks - -If your site is not live after completing the above steps, verify the following in the Vercel Dashboard: - -### Project Settings -1. **Settings β†’ General** - - Production Branch: `main` βœ… - - Framework: `Next.js` βœ… - -2. **Settings β†’ Domains** - - tradehaxai.tech: Valid Configuration βœ… - - SSL Certificate: Active βœ… - -3. **Settings β†’ Environment Variables** - - All required variables set for Production βœ… - - `NEXT_PUBLIC_APP_URL=https://tradehaxai.tech` βœ… - -### Recent Deployments -1. **Deployments Tab** - - Latest deployment: Ready βœ… - - Production deployment: Active βœ… - - No build errors βœ… - -### Analytics & Monitoring -1. **Analytics Tab** - - Traffic showing for tradehaxai.tech βœ… - - No 4xx or 5xx errors βœ… - ---- - -## DNS Propagation Timeline - -Typical propagation times: -- **5-10 minutes**: Initial DNS servers update -- **1-2 hours**: Most DNS servers worldwide updated -- **24-48 hours**: Full global propagation guaranteed - -**Pro Tip**: Use lower TTL values (300-600 seconds) during initial setup for faster updates. Increase to 3600+ after domain is stable. - ---- - -## Security Best Practices - -1. **Keep DNS Records Secure** - - Never share DNS provider credentials - - Enable 2FA on domain registrar account - - Use strong, unique passwords - -2. **Monitor Domain Status** - - Set up alerts for DNS changes - - Regularly verify domain ownership - - Monitor SSL certificate expiration - -3. **Environment Variables** - - Never commit `.env` files to git - - Use Vercel Environment Variables for secrets - - Rotate API keys periodically - ---- - -## Additional Resources - -- **Vercel Domains Documentation**: https://vercel.com/docs/concepts/projects/domains -- **DNS Checker Tool**: https://dnschecker.org -- **SSL Checker**: https://www.sslshopper.com/ssl-checker.html -- **Vercel Support**: https://vercel.com/support -- **Next.js Documentation**: https://nextjs.org/docs - ---- - -## Deployment Checklist - -Complete this checklist to ensure proper domain setup: - -- [ ] Domain verification TXT record added: `_vercel` β†’ `vc-domain-verify=tradehaxai.tech,9b1517380c738599577c` -- [ ] A record added: `@` β†’ `76.76.21.21` -- [ ] CNAME record added: `www` β†’ `cname.vercel-dns.com.` -- [ ] Domain added in Vercel dashboard: tradehaxai.tech -- [ ] DNS propagation verified using dnschecker.org -- [ ] Domain shows "Valid Configuration" in Vercel -- [ ] SSL certificate issued and active -- [ ] Site accessible via https://tradehaxai.tech -- [ ] No mixed content warnings in browser console -- [ ] All environment variables updated for production -- [ ] Latest deployment successful on main branch -- [ ] Analytics tracking working on custom domain - ---- - -**Last Updated**: 2026-01-28 -**Status**: Production Ready -**Deployment Method**: Automated via GitHub Actions + Vercel diff --git a/VERCEL_STATIC_EXPORT_FIX.md b/VERCEL_STATIC_EXPORT_FIX.md deleted file mode 100644 index f19110f6..00000000 --- a/VERCEL_STATIC_EXPORT_FIX.md +++ /dev/null @@ -1,193 +0,0 @@ -# Vercel Static Export Fix - -## Problem - -The Vercel deployment was failing with the error: - -``` -The file "/vercel/path0/out/routes-manifest.json" couldn't be found. -``` - -This happened because: - -1. **next.config.ts** had `output: 'export'` configured for static export (GitHub Pages) -2. Static export creates files in `out/` directory without a `routes-manifest.json` file -3. Vercel expects `routes-manifest.json` to exist in the build output for proper Next.js deployments -4. The configuration was forcing both GitHub Pages AND Vercel to use static export, which doesn't work for Vercel - -## Solution - -We implemented a **conditional static export** strategy that: - -- βœ… Uses static export (`out/`) for GitHub Pages deployment -- βœ… Uses standard Next.js build (`.next/`) for Vercel deployment -- βœ… Maintains both deployment methods without conflicts - -### Changes Made - -#### 1. next.config.ts - Conditional Static Export - -```typescript -const nextConfig: NextConfig = { - // Static export for GitHub Pages only (not for Vercel) - // Vercel automatically sets VERCEL=1 environment variable - ...(process.env.VERCEL !== '1' && { output: 'export' }), - // ... rest of config -}; -``` - -**How it works:** -- When building locally or in GitHub Actions: `output: 'export'` is applied β†’ creates `out/` directory -- When building on Vercel: `VERCEL=1` is automatically set β†’ `output: 'export'` is skipped β†’ creates `.next/` directory with `routes-manifest.json` - -#### 2. vercel.json - Remove Output Directory Override - -**Before:** -```json -{ - "outputDirectory": "out" -} -``` - -**After:** -```json -{ - // outputDirectory removed - let Vercel use default .next -} -``` - -**Why:** Vercel's default behavior is to look for Next.js builds in `.next/` directory. By removing the override, we let Vercel do what it does best. - -#### 3. .vercelignore - Allow .next Directory - -**Before:** -``` -.next -out -``` - -**After:** -``` -# .next is needed for Vercel deployment -out -``` - -**Why:** Vercel needs access to the `.next/` build directory to properly deploy the application. - -## How the Dual Deployment Works - -### GitHub Pages Deployment (GitHub Actions) - -``` -1. Code pushed to main branch -2. GitHub Actions workflow runs -3. Runs: npm run build -4. VERCEL env var is NOT set -5. next.config.ts applies output: 'export' -6. Creates: out/ directory with static HTML files -7. Pushes out/ to gh-pages branch -8. GitHub Pages serves from gh-pages branch -``` - -### Vercel Deployment (Vercel Platform) - -``` -1. Code pushed to main branch -2. Vercel detects changes -3. Runs: npm install && npm run build -4. VERCEL=1 is automatically set by Vercel -5. next.config.ts skips output: 'export' -6. Creates: .next/ directory with routes-manifest.json -7. Vercel deploys using standard Next.js hosting -8. Full Next.js features available (SSR, API routes, etc.) -``` - -## Verification - -### Test Locally (Static Export Mode) - -```bash -# Clean previous builds -npm run clean - -# Build without VERCEL env (simulates GitHub Pages) -npm run build - -# Verify out/ directory exists -ls -la out/ - -# Should show: index.html, blog/, game/, etc. -``` - -### Test Locally (Vercel Mode) - -```bash -# Clean previous builds -npm run clean - -# Build with VERCEL=1 (simulates Vercel deployment) -VERCEL=1 npm run build - -# Verify .next/ directory exists with routes-manifest.json -ls -la .next/routes-manifest.json - -# Should show: .next/routes-manifest.json file -``` - -## Benefits of This Approach - -1. βœ… **No code duplication** - single codebase serves both deployments -2. βœ… **Automatic switching** - environment detection handles everything -3. βœ… **Best of both worlds**: - - GitHub Pages: Free static hosting, backup deployment - - Vercel: Full Next.js features, custom domain, edge functions -4. βœ… **Zero configuration needed** - Vercel sets VERCEL=1 automatically -5. βœ… **Future-proof** - can easily switch strategies if needed - -## Troubleshooting - -### Issue: Vercel still shows "routes-manifest.json not found" - -**Check:** -1. Verify `.next` is NOT in `.vercelignore` -2. Verify `vercel.json` does NOT have `"outputDirectory": "out"` -3. Check Vercel build logs to confirm VERCEL=1 is set -4. Try redeploying without cache: Vercel Dashboard β†’ Deployments β†’ Redeploy β†’ Uncheck "Use existing Build Cache" - -### Issue: GitHub Pages deployment broken - -**Check:** -1. Verify `output: 'export'` is still in next.config.ts (conditionally) -2. Check GitHub Actions workflow is running `npm run build` (without VERCEL=1) -3. Verify `out/` directory is being created in GitHub Actions -4. Check gh-pages branch has the static files - -### Issue: Both deployments fail - -**Check:** -1. Test build locally: `npm run build` (should work) -2. Test Vercel build: `VERCEL=1 npm run build` (should work) -3. Check for TypeScript/ESLint errors -4. Verify all dependencies are in package.json - -## Related Documentation - -- [VERCEL_DEPLOYMENT_TROUBLESHOOTING.md](./VERCEL_DEPLOYMENT_TROUBLESHOOTING.md) - Complete troubleshooting guide -- [DEPLOYMENT_FIX_SUMMARY.md](./DEPLOYMENT_FIX_SUMMARY.md) - Dual deployment overview -- [VERCEL_BRANCH_FIX.md](./VERCEL_BRANCH_FIX.md) - Branch configuration fix - -## Summary - -This fix enables the repository to maintain two independent deployment strategies: - -- **GitHub Pages**: Static export for free hosting and backup -- **Vercel**: Standard Next.js deployment for production with custom domain - -The conditional configuration ensures both deployments work correctly without conflicts, using environment detection to automatically choose the right build strategy. - ---- - -**Issue Fixed**: routes-manifest.json not found in /out/ -**Solution**: Conditional static export based on VERCEL environment variable -**Result**: Both GitHub Pages and Vercel deployments now work correctly -**Date**: 2026-02-08 diff --git a/_safe_backup/NeuralHub.jsx b/_safe_backup/NeuralHub.jsx deleted file mode 100644 index a64c4906..00000000 --- a/_safe_backup/NeuralHub.jsx +++ /dev/null @@ -1,375 +0,0 @@ -"use client"; - -import { useState, useEffect, useRef, useCallback } from "react"; -import { FileUploadComponent } from "./components/FileUploadComponent"; - -/** - * TRADEHAX NEURAL HUB - * One-page AI interface - Clean, uncensored, professional - * Claude/Grok style with direct LLM access - */ - -const COLORS = { - bg: "#0a0e27", - surface: "#1a1f3a", - panel: "#242d4a", - border: "#3a4558", - text: "#e0e6ff", - textDim: "#8b95b8", - accent: "#00d9ff", - success: "#00ff88", - warning: "#ffaa00", - error: "#ff4455", -}; - -const MODELS = [ - { id: "meta-llama/Llama-3.3-70B-Instruct", name: "Llama 70B", type: "Full Power" }, - { id: "Qwen/Qwen2.5-7B-Instruct", name: "Qwen 7B", type: "Fast" }, - { id: "microsoft/Phi-4-mini-instruct", name: "Phi-4", type: "Instant" }, -]; - -export default function NeuralHub() { - // State - const [messages, setMessages] = useState([ - { - id: "init", - role: "assistant", - content: "Welcome to TradeHax Neural Hub. Ask anything - no filters, no restrictions. Uncensored AI analysis powered by Llama 70B.", - timestamp: new Date(), - }, - ]); - const [input, setInput] = useState(""); - const [loading, setLoading] = useState(false); - const [selectedModel, setSelectedModel] = useState(MODELS[0].id); - const [temperature, setTemperature] = useState(0.7); - const [maxTokens, setMaxTokens] = useState(1024); - const messagesEnd = useRef(null); - - // Auto-scroll - useEffect(() => { - messagesEnd.current?.scrollIntoView({ behavior: "smooth" }); - }, [messages]); - - // Send message - const handleSend = useCallback(async () => { - const trimmed = input.trim(); - if (!trimmed || loading) return; - - // Add user message - const userMsg = { - id: Date.now().toString(), - role: "user", - content: trimmed, - timestamp: new Date(), - }; - setMessages((prev) => [...prev, userMsg]); - setInput(""); - setLoading(true); - - try { - // Try to use backend API first (which has the token securely) - const hfToken = import.meta.env.VITE_HF_TOKEN || window.ENV?.VITE_HF_TOKEN || ""; - - let response = null; - let aiResponse = null; - - // If token is available, try live HF LLM - if (hfToken) { - try { - response = await fetch("https://api-inference.huggingface.co/models/" + selectedModel, { - method: "POST", - headers: { - Authorization: `Bearer ${hfToken}`, - "Content-Type": "application/json", - }, - body: JSON.stringify({ - inputs: trimmed, - parameters: { - max_new_tokens: maxTokens, - temperature: temperature, - top_p: 0.95, - }, - }), - }); - - if (response?.ok) { - const data = await response.json(); - aiResponse = Array.isArray(data) - ? data[0]?.generated_text || "No response" - : data.generated_text || JSON.stringify(data); - } - } catch (hfError) { - console.warn("HF API failed, falling back to demo:", hfError); - } - } - - // Fallback to demo mode if live LLM failed or no token - if (!aiResponse) { - aiResponse = getDemoResponse(trimmed); - } - - // Clean response - const cleanResponse = (aiResponse || "") - .replace(trimmed, "") - .trim() - .slice(0, 2000) || getDemoResponse(trimmed); - - setMessages((prev) => [ - ...prev, - { - id: Date.now().toString(), - role: "assistant", - content: cleanResponse, - timestamp: new Date(), - }, - ]); - } catch (error) { - console.error("Chat Error:", error); - setMessages((prev) => [ - ...prev, - { - id: Date.now().toString(), - role: "assistant", - content: getDemoResponse(trimmed), - timestamp: new Date(), - }, - ]); - } finally { - setLoading(false); - } - }, [input, loading, selectedModel, temperature, maxTokens]); - - // Demo response generator (no API needed) - const getDemoResponse = (userInput) => { - const q = userInput.toLowerCase(); - - if (q.includes("trading") || q.includes("market")) { - return "For trading analysis, I'd examine market structure, key support/resistance levels, and volume profile. Current crypto markets show strong consolidation patterns with potential breakout zones forming around major moving averages. What specific asset or timeframe are you interested in?"; - } - if (q.includes("ai") || q.includes("neural")) { - return "The neural hub integrates multiple AI models for sophisticated analysis. Llama 70B provides reasoning, Qwen 7B excels at structured data, and Phi-4 delivers low-latency responses. Each model is optimized for different use cases. Which analysis type interests you?"; - } - if (q.includes("price") || q.includes("forecast")) { - return "Price forecasting requires multi-timeframe confluence analysis. I use Fibonacci levels, moving average crossovers, and volume profile to identify high-probability entry zones. What timeframe and instrument are you analyzing?"; - } - if (q.includes("risk") || q.includes("kelly")) { - return "Risk management through Kelly Criterion: position sizing = (edge Γ— odds - 1) / odds. Fractional Kelly (25%) is recommended for safety. Combined with Monte Carlo simulations (500 paths), this model accurately predicts ruin rates and optimal growth curves."; - } - if (q.includes("signal") || q.includes("strategy")) { - return "Signal generation integrates: RSI oversold/overbought zones, MACD histogram divergence, Bollinger Band squeeze potential, and Fibonacci confluence. Multi-timeframe confirmation increases accuracy to 65-75% win rate depending on market regime."; - } - if (q.includes("hello") || q.includes("hi")) { - return "Welcome to TradeHax Neural Hub! I'm an uncensored AI assistant with access to multiple models. Ask me about trading strategies, market analysis, AI capabilities, or anything else. No filters, no restrictions."; - } - - // Default intelligent response - return `That's an interesting question about "${userInput.slice(0, 30)}...". The TradeHax platform integrates advanced trading analysis with AI reasoning. I can help with technical analysis, risk management, market signals, and strategic planning. What specific area would you like to explore?`; - }; - - return ( -
- {/* Header */} -
-
-

- TradeHax Neural Hub -

-

- Uncensored AI - Direct LLM access - No filters -

-
-
- - {/* File Storage Section */} -
-
- -
-
- - {/* Main Container */} -
- {/* Messages Area */} -
- {/* Chat Messages */} -
- {messages.map((msg) => ( -
-
- {msg.content} -
- {msg.timestamp.toLocaleTimeString()} -
-
-
- ))} - {loading && ( -
-
- ⏳ Thinking... -
-
- )} -
-
- - {/* Input Area */} -
-
- {MODELS.map((model) => ( - - ))} -
- - {/* Controls */} -
-
- - setTemperature(parseFloat(e.target.value))} - style={{ width: "100%", marginTop: "8px" }} - /> -
- {temperature < 0.5 ? "Precise" : temperature < 1.5 ? "Balanced" : "Creative"} -
-
- -
- - setMaxTokens(parseInt(e.target.value))} - style={{ width: "100%", marginTop: "8px" }} - /> -
- {maxTokens < 512 ? "Short" : maxTokens < 1024 ? "Medium" : "Long"} -
-
-
- - {/* Input Box */} -
-