-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathtechpack.yaml
More file actions
171 lines (156 loc) · 6.25 KB
/
techpack.yaml
File metadata and controls
171 lines (156 loc) · 6.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
schemaVersion: 1
identifier: memory
displayName: "Memory"
description: Persistent memory and knowledge management for Claude Code
author: Bruno Guidolim
minMCSVersion: "2026.4.12"
# ---------------------------------------------------------------------------
# Components
# ---------------------------------------------------------------------------
components:
# ── Dependencies ────────────────────────────────────────────────────────
- id: node
displayName: Node.js
description: JavaScript runtime (for npx-based MCP servers)
brew: node
- id: gh
displayName: GitHub CLI
description: GitHub CLI for PR operations
brew: gh
- id: jq
displayName: jq
description: Lightweight JSON processor
brew: jq
- id: ollama
displayName: Ollama
description: Local LLM runtime (compatible with all Apple Silicon)
type: configuration
shell: "curl -fsSL https://ollama.com/install.sh | sh"
shellInteractive: true
doctorChecks:
- type: commandExists
name: "Ollama installed"
section: AI Models
command: ollama
args: ["--version"]
- id: ollama-service
displayName: Ollama service
description: Ensure Ollama is running
type: configuration
dependencies: [ollama]
shell: "open /Applications/Ollama.app --args hidden"
doctorChecks:
- type: commandExists
name: "Ollama service running"
section: AI Models
command: curl
args: ["-sf", "http://localhost:11434/api/tags"]
fixCommand: "open /Applications/Ollama.app --args hidden"
- id: ollama-nomic-embed
displayName: nomic-embed-text model
description: Embedding model for docs-mcp-server
type: configuration
dependencies: [ollama-service]
shell: "ollama pull nomic-embed-text"
doctorChecks:
- type: commandExists
name: "nomic-embed-text model"
section: AI Models
command: curl
args: ["-sf", "http://localhost:11434/api/show", "-d", "{\"name\":\"nomic-embed-text\"}"]
# Probes the exact endpoint docs-mcp-server calls. /api/show only reads the
# manifest — it does not load the model. This check forces a full load+embed
# round-trip, so a runner crash (e.g. Ollama/ggml incompatibility) surfaces
# as HTTP 500 and fails the check.
- type: commandExists
name: "nomic-embed-text embedding endpoint"
section: AI Models
command: curl
args:
- "-sf"
- "-X"
- "POST"
- "-H"
- "Content-Type: application/json"
- "-d"
- '{"model":"nomic-embed-text","input":"ping"}'
- "http://localhost:11434/v1/embeddings"
# ── MCP Servers ─────────────────────────────────────────────────────────
- id: docs-mcp-server
description: Semantic search over memories using local Ollama embeddings
isRequired: true
dependencies: [node, ollama]
mcp:
command: npx
args:
- "-y"
- "@arabold/docs-mcp-server@latest"
- "--read-only"
- "--telemetry=false"
env:
OPENAI_API_KEY: "ollama"
OPENAI_API_BASE: "http://localhost:11434/v1"
DOCS_MCP_EMBEDDING_MODEL: "openai:nomic-embed-text"
# ── Skills ──────────────────────────────────────────────────────────────
- id: skill-continuous-learning
displayName: continuous-learning skill
description: Extracts learnings and decisions from sessions into memory
isRequired: true
skill:
source: skills/continuous-learning
destination: continuous-learning
- id: skill-memory-audit
displayName: memory-audit skill
description: Reviews and audits memories to keep the knowledge base lean and valuable
skill:
source: skills/memory-audit
destination: memory-audit
# ── Hooks ───────────────────────────────────────────────────────────────
- id: hook-continuous-learning
displayName: Continuous learning activator
description: Reminds to evaluate learnings on each prompt
isRequired: true
hookEvent: UserPromptSubmit
hook:
source: hooks/continuous-learning-activator.sh
destination: continuous-learning-activator.sh
- id: hook-sync-memories
displayName: Sync memories hook
description: Checks Ollama health and syncs docs-mcp-server library on session start
dependencies: [ollama, docs-mcp-server, jq]
hookEvent: SessionStart
hookAsync: true
hookTimeout: 120
hookStatusMessage: "Indexing memories..."
hook:
source: hooks/sync-memories.sh
destination: sync-memories.sh
- id: hook-reindex-memories
displayName: Reindex memories hook
description: Reindexes docs-mcp-server library when memories have changed mid-session
dependencies: [ollama, docs-mcp-server, jq]
hookEvent: UserPromptSubmit
hookAsync: true
hookTimeout: 120
hookStatusMessage: "Reindexing memories..."
hook:
source: hooks/sync-memories.sh
destination: sync-memories.sh
# ── Configuration ───────────────────────────────────────────────────────
- id: settings
displayName: Settings
description: Disables built-in auto-memory in favor of continuous learning system
settingsFile: config/settings.json
- id: gitignore
displayName: Global gitignore
description: Ignores memory files from version control
gitignore:
- ".claude/.memories-last-indexed"
# ---------------------------------------------------------------------------
# Templates — CLAUDE.local.md sections
# ---------------------------------------------------------------------------
templates:
- sectionIdentifier: continuous-learning
placeholders:
- __PROJECT_DIR_NAME__
contentFile: templates/continuous-learning.md