From 44f51c34d1733b5419b70f8837f808eb0162fa3b Mon Sep 17 00:00:00 2001 From: Stephan Butler Date: Fri, 24 Oct 2025 12:18:32 +0200 Subject: [PATCH 1/4] feat(chart-validator): breaking change: dramatically reduces the size of the image --- chartvalidator/Dockerfile | 27 +- chartvalidator/checker/.gitignore | 7 + chartvalidator/checker/appsets.go | 173 +++++++ chartvalidator/checker/engine_app_checker.go | 160 ++++++ .../checker/engine_chart_rendering.go | 148 ++++++ .../checker/engine_chart_rendering_test.go | 68 +++ .../checker/engine_docker_validation.go | 228 +++++++++ .../checker/engine_docker_validation_test.go | 442 ++++++++++++++++ .../checker/engine_image_extraction.go | 369 ++++++++++++++ .../checker/engine_image_extraction_test.go | 480 ++++++++++++++++++ .../checker/engine_manifest_validation.go | 112 ++++ .../engine_manifest_validation_test.go | 124 +++++ chartvalidator/checker/exec_interface.go | 59 +++ chartvalidator/checker/exec_mock.go | 79 +++ chartvalidator/checker/go.mod | 13 + chartvalidator/checker/go.sum | 10 + chartvalidator/checker/main.go | 130 +++++ .../checker/test_data/configmap.yaml | 12 + .../checker/test_data/deployment.yaml | 67 +++ chartvalidator/checker/test_data/example.yaml | 123 +++++ chartvalidator/checker/test_data/service.yaml | 21 + chartvalidator/checker/types.go | 72 +++ chartvalidator/checker/utils.go | 131 +++++ chartvalidator/checker/utils_test.go | 161 ++++++ 24 files changed, 3206 insertions(+), 10 deletions(-) create mode 100644 chartvalidator/checker/.gitignore create mode 100644 chartvalidator/checker/appsets.go create mode 100644 chartvalidator/checker/engine_app_checker.go create mode 100644 chartvalidator/checker/engine_chart_rendering.go create mode 100644 chartvalidator/checker/engine_chart_rendering_test.go create mode 100644 chartvalidator/checker/engine_docker_validation.go create mode 100644 chartvalidator/checker/engine_docker_validation_test.go create mode 100644 chartvalidator/checker/engine_image_extraction.go create mode 100644 chartvalidator/checker/engine_image_extraction_test.go create mode 100644 chartvalidator/checker/engine_manifest_validation.go create mode 100644 chartvalidator/checker/engine_manifest_validation_test.go create mode 100644 chartvalidator/checker/exec_interface.go create mode 100644 chartvalidator/checker/exec_mock.go create mode 100644 chartvalidator/checker/go.mod create mode 100644 chartvalidator/checker/go.sum create mode 100644 chartvalidator/checker/main.go create mode 100644 chartvalidator/checker/test_data/configmap.yaml create mode 100644 chartvalidator/checker/test_data/deployment.yaml create mode 100644 chartvalidator/checker/test_data/example.yaml create mode 100644 chartvalidator/checker/test_data/service.yaml create mode 100644 chartvalidator/checker/types.go create mode 100644 chartvalidator/checker/utils.go create mode 100644 chartvalidator/checker/utils_test.go diff --git a/chartvalidator/Dockerfile b/chartvalidator/Dockerfile index 4a62ea5..80756cd 100644 --- a/chartvalidator/Dockerfile +++ b/chartvalidator/Dockerfile @@ -1,7 +1,14 @@ -FROM golang:1.25-alpine +FROM golang:1.25-alpine AS checkerbuild WORKDIR /root +COPY checker /root/checker +WORKDIR /root/checker +RUN go test . +RUN go build -o chart-checker . + +FROM alpine:3.22 + RUN apk add --no-cache curl bash # Install python3 and pip @@ -24,16 +31,16 @@ RUN apk add --no-cache make # Install docker client RUN apk add --no-cache docker-cli -RUN mkdir /googlesdk -WORKDIR /googlesdk +# Install docker-credential-gcr for Google Artifact Registry authentication +# This is much lighter (~20MB) than full gcloud SDK (~1GB+) +RUN curl -fsSL "https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v2.1.26/docker-credential-gcr_linux_amd64-2.1.26.tar.gz" | tar xz -C /usr/local/bin docker-credential-gcr \ + && chmod +x /usr/local/bin/docker-credential-gcr -# As described https://cloud.google.com/sdk/docs/install -RUN curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-cli-linux-x86_64.tar.gz -RUN tar -xvf google-cloud-cli-linux-x86_64.tar.gz -RUN ./google-cloud-sdk/install.sh --quiet -RUN ./google-cloud-sdk/bin/gcloud components install kubectl +# Configure docker to use the GCR credential helper +RUN mkdir -p /root/.docker \ + && echo '{"credHelpers":{"gcr.io":"gcr","us-docker.pkg.dev":"gcr","europe-docker.pkg.dev":"gcr","asia-docker.pkg.dev":"gcr"}}' > /root/.docker/config.json -# Add gcloud bin to PATH -ENV PATH="/googlesdk/google-cloud-sdk/bin:${PATH}" +# Copy chart-checker from the builder stage +COPY --from=checkerbuild /root/checker/chart-checker /usr/local/bin/chart-checker WORKDIR /app \ No newline at end of file diff --git a/chartvalidator/checker/.gitignore b/chartvalidator/checker/.gitignore new file mode 100644 index 0000000..ac192d1 --- /dev/null +++ b/chartvalidator/checker/.gitignore @@ -0,0 +1,7 @@ +_out +helm_output +*.py +*.bin +manifests +imageLists +test_output \ No newline at end of file diff --git a/chartvalidator/checker/appsets.go b/chartvalidator/checker/appsets.go new file mode 100644 index 0000000..04477cf --- /dev/null +++ b/chartvalidator/checker/appsets.go @@ -0,0 +1,173 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "gopkg.in/yaml.v3" +) + +// findChartsInAppsets scans ApplicationSet files and extracts chart information +func findChartsInAppsets(envDir, selectedEnv string) ([]ChartRenderParams, error) { + const suffix = "appset.yaml" + var out []ChartRenderParams + + fmt.Println("Scanning environments in", envDir) + + if selectedEnv != "" { + envPath := filepath.Join(envDir, selectedEnv) + ok, err := existsDir(envPath) + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("environment %q not found in %s", selectedEnv, envDir) + } + ch, err := processEnvironment(selectedEnv, envPath, suffix) + if err != nil { + return nil, err + } + return ch, nil + } + + entries, err := os.ReadDir(envDir) + if err != nil { + return nil, err + } + for _, e := range entries { + if !e.IsDir() { + continue + } + envName := e.Name() + envPath := filepath.Join(envDir, envName) + ch, err := processEnvironment(envName, envPath, suffix) + if err != nil { + return nil, err + } + out = append(out, ch...) + } + return out, nil +} + +// processEnvironment extracts charts from a single environment directory +func processEnvironment(envName, envPath, suffix string) ([]ChartRenderParams, error) { + appsetsPath := filepath.Join(envPath, "appsets") + ok, err := existsDir(appsetsPath) + if err != nil || !ok { + return []ChartRenderParams{}, err + } + + files, err := listAppsetFiles(appsetsPath, suffix) + if err != nil { + return nil, err + } + + var charts []ChartRenderParams + for _, f := range files { + data, err := os.ReadFile(f) + if err != nil { + return nil, err + } + var node any + if err := yaml.Unmarshal(data, &node); err != nil { + return nil, fmt.Errorf("failed to parse YAML %s: %w", f, err) + } + elems := extractElements(node) + for _, el := range elems { + charts = append(charts, extractChartInfo(el, envName)) + } + } + return charts, nil +} + +// listAppsetFiles returns all files ending with the given suffix in the directory +func listAppsetFiles(dir, suffix string) ([]string, error) { + ents, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + var out []string + for _, e := range ents { + if e.IsDir() { + continue + } + name := e.Name() + if strings.HasSuffix(name, suffix) { + out = append(out, filepath.Join(dir, name)) + } + } + return out, nil +} + +// existsDir checks if a directory exists +func existsDir(path string) (bool, error) { + info, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return info.IsDir(), nil +} + +// extractElements extracts the list elements from an ApplicationSet document +func extractElements(doc any) []map[string]any { + // Navigate: spec.generators[0].list.elements + m, ok := doc.(map[string]any) + if !ok { + return nil + } + spec, _ := m["spec"].(map[string]any) + if spec == nil { + return nil + } + gens, _ := spec["generators"].([]any) + if len(gens) == 0 { + return nil + } + gen0, _ := gens[0].(map[string]any) + if gen0 == nil { + return nil + } + lst, _ := gen0["list"].(map[string]any) + if lst == nil { + return nil + } + elems, _ := lst["elements"].([]any) + if len(elems) == 0 { + return nil + } + var out []map[string]any + for _, e := range elems { + if mm, ok := e.(map[string]any); ok { + out = append(out, mm) + } + } + return out +} + +// extractChartInfo extracts Chart information from an ApplicationSet element +func extractChartInfo(el map[string]any, env string) ChartRenderParams { + return ChartRenderParams{ + Env: env, + ChartName: str(el["chartName"]), + RepoURL: str(el["repoURL"]), + ChartVersion: str(el["chartVersion"]), + BaseValuesFile: srcPrefix + str(el["baseValuesFile"]), + ValuesOverride: srcPrefix + str(el["valuesOverride"]), + } +} + +// str converts any value to string, handling nil safely +func str(v any) string { + if v == nil { + return "" + } + if s, ok := v.(string); ok { + return s + } + return fmt.Sprintf("%v", v) +} \ No newline at end of file diff --git a/chartvalidator/checker/engine_app_checker.go b/chartvalidator/checker/engine_app_checker.go new file mode 100644 index 0000000..5e23b13 --- /dev/null +++ b/chartvalidator/checker/engine_app_checker.go @@ -0,0 +1,160 @@ +package main + +import ( + "context" + "fmt" + "sync" +) + +type AppCheckInstruction struct { + Chart ChartRenderParams +} + +type AppCheckResult struct { + Chart ChartRenderParams + Image string + Error error +} + +type AppCheckerEngine struct { + inputChan chan AppCheckInstruction + resultChan chan AppCheckResult + errorChan chan ErrorResult + + ChartRenderingEngine *ChartRenderingEngine + ManifestValidationEngine *ManifestValidationEngine + ImageExtractionEngine *ImageExtractionEngine + DockerValidationEngine *DockerImageValidationEngine + + context context.Context + executor CommandExecutor + + workerWaitGroup sync.WaitGroup + + name string +} + +func NewAppCheckerEngine(context context.Context, outputDir string) *AppCheckerEngine { + + errorChan := make(chan ErrorResult) + + cre := ChartRenderingEngine{ + inputChan: make(chan ChartRenderParams), + resultChan: make(chan RenderResult), + errorChan: errorChan, + outputDir: outputDir, + context: context, + executor: &RealCommandExecutor{}, + name: "ChartRenderer", + } + + mve := ManifestValidationEngine{ + inputChan: cre.resultChan, + resultChan: make(chan ManifestValidationResult), + errorChan: errorChan, + context: context, + executor: &RealCommandExecutor{}, + name: "ManifestValidator", + workerWaitGroup: sync.WaitGroup{}, + } + + iee := ImageExtractionEngine{ + inputChan: mve.resultChan, + outputChan: make(chan ImageExtractionResult), + errorChan: errorChan, + context: context, + name: "ImageExtractor", + workerWaitGroup: sync.WaitGroup{}, + } + + dve := DockerImageValidationEngine{ + inputChan: iee.outputChan, + outputChan: make(chan DockerImageValidationResult), + context: context, + executor: &RealCommandExecutor{}, + name: "DockerValidator", + cache: map[string]DockerImageValidationResult{}, + pending: map[string]*sync.WaitGroup{}, + cacheLock: sync.RWMutex{}, + workerWaitGroup: sync.WaitGroup{}, + } + + return &AppCheckerEngine{ + inputChan: make(chan AppCheckInstruction), + resultChan: make(chan AppCheckResult), + errorChan: make(chan ErrorResult), + + context: context, + executor: &RealCommandExecutor{}, + + ChartRenderingEngine: &cre, + ManifestValidationEngine: &mve, + ImageExtractionEngine: &iee, + DockerValidationEngine: &dve, + + name: "AppChecker", + } +} + +func (engine *AppCheckerEngine) allDoneWorker() { + engine.workerWaitGroup.Wait() + logEngineDebug(engine.name,-1,"all workers done, closing output channel") + close(engine.resultChan) +} + +func (engine *AppCheckerEngine) Start(workerCount int) { + + // Fire up the engines + engine.ChartRenderingEngine.Start(workerCount) + engine.ManifestValidationEngine.Start(workerCount) + engine.ImageExtractionEngine.Start(workerCount) + engine.DockerValidationEngine.Start(workerCount) + + // Pour the input instructions into the chart renderer + engine.workerWaitGroup.Add(1) + go engine.pumpAppCheckInstructionsToChartRenderer() + engine.workerWaitGroup.Add(1) + go engine.pumpOutputsToAppCheckResults() + + go engine.allDoneWorker() +} + +func (engine *AppCheckerEngine) pumpOutputsToAppCheckResults() { + defer engine.workerWaitGroup.Done() + for dockerResult := range engine.DockerValidationEngine.outputChan { + if dockerResult.Error != nil { + engine.resultChan <- AppCheckResult{ + Chart: dockerResult.Chart, + Image: dockerResult.Image, + Error: dockerResult.Error, + } + continue + } else { + var err error = nil + if !dockerResult.Exists { + err = fmt.Errorf("docker image does not exist: %s", dockerResult.Image) + } + engine.resultChan <- AppCheckResult{ + Chart: dockerResult.Chart, + Image: dockerResult.Image, + Error: err, + } + } + } + logEngineDebug(engine.name, -1, "docker validation output closed") +} + +func (engine *AppCheckerEngine) pumpAppCheckInstructionsToChartRenderer() { + defer engine.workerWaitGroup.Done() + for instruction := range engine.inputChan { + engine.ChartRenderingEngine.inputChan <- ChartRenderParams{ + Env: instruction.Chart.Env, + ChartName: instruction.Chart.ChartName, + RepoURL: instruction.Chart.RepoURL, + ChartVersion: instruction.Chart.ChartVersion, + BaseValuesFile: instruction.Chart.BaseValuesFile, + ValuesOverride: instruction.Chart.ValuesOverride, + } + } + close(engine.ChartRenderingEngine.inputChan) +} \ No newline at end of file diff --git a/chartvalidator/checker/engine_chart_rendering.go b/chartvalidator/checker/engine_chart_rendering.go new file mode 100644 index 0000000..2252701 --- /dev/null +++ b/chartvalidator/checker/engine_chart_rendering.go @@ -0,0 +1,148 @@ +package main + +import ( + "context" + "fmt" + "math/rand" + "os" + "path/filepath" + "strings" + "sync" +) + + +type ChartRenderingEngine struct { + inputChan chan ChartRenderParams + resultChan chan RenderResult + errorChan chan ErrorResult + + outputDir string + context context.Context + executor CommandExecutor + name string + workerWaitGroup sync.WaitGroup +} + +type RenderResult struct { + Chart ChartRenderParams + ManifestPath string +} + +func (engine *ChartRenderingEngine) Start(workerCount int) { + if err := recreateOutputDir(engine.outputDir); err != nil { + msg := fmt.Sprintf("failed to prepare output directory: %s", err.Error()) + logEngineWarning(engine.name, -1, msg) + panic("This should not happen") + } + + for i := 0; i < workerCount; i++ { + engine.workerWaitGroup.Add(1) + go func(workerId int) { + engine.worker(workerId) + }(i) + } + go engine.allDoneWorker() +} + +func (engine *ChartRenderingEngine) allDoneWorker() { + engine.workerWaitGroup.Wait() + logEngineDebug(engine.name,-1,"all workers done, closing output channel") + close(engine.resultChan) +} + +func (engine *ChartRenderingEngine) worker(workerId int) { + defer engine.workerWaitGroup.Done() + + for { + select { + case chart, ok := <-engine.inputChan: + if !ok { + logEngineDebug(engine.name, workerId, "input closed") + return + } + + result, err := engine.renderSingleChart(chart, workerId) + if err != nil { + engine.errorChan <- ErrorResult{Chart: chart, Error: err} + continue + } + engine.resultChan <- *result + case <-engine.context.Done(): + logEngineDebug(engine.name, workerId, "context done") + return + } + } +} + + +func (engine *ChartRenderingEngine) renderSingleChart(chart ChartRenderParams, workerId int) (*RenderResult, error) { + + if !engine.executor.FileExists(chart.BaseValuesFile) { + msg := fmt.Sprintf("base values file does not exist: %s", chart.BaseValuesFile) + logEngineWarning(engine.name, workerId, msg) + return nil, fmt.Errorf("base values file does not exist: %s", chart.BaseValuesFile) + } + if !engine.executor.FileExists(chart.ValuesOverride) { + msg := fmt.Sprintf("values override file does not exist: %s", chart.ValuesOverride) + logEngineWarning(engine.name, workerId, msg) + return nil, fmt.Errorf("values override file does not exist: %s", chart.ValuesOverride) + } + + args := []string{ + "template", chart.ChartName, + "--release-name", chart.ChartName, + "--repo", chart.RepoURL, + "-f", chart.BaseValuesFile, + "-f", chart.ValuesOverride, + "--version", chart.ChartVersion, + "--include-crds", + } + + logEngineDebug(engine.name, workerId, fmt.Sprintf("helm %s", strings.Join(args, " "))) + cmd := engine.executor.CommandContext(engine.context, "helm", args...) + + // Set working directory to current directory so relative paths work + if wd, err := os.Getwd(); err == nil { + cmd.SetDir(wd) + } + + output, err := cmd.CombinedOutput() + if err != nil { + msg := fmt.Sprintf("helm command failed: %s\nOutput: %s", err.Error(), string(output)) + logEngineWarning(engine.name, workerId, msg) + return nil, fmt.Errorf("helm command failed: %w", err) + } + + logEngineDebug(engine.name, workerId, fmt.Sprintf("helm %s\t\tCOMPLETED", strings.Join(args, " "))) + + // Create output file path using release name (use absolute path for output) + absOutputDir, err := filepath.Abs(engine.outputDir) + if err != nil { + msg := fmt.Sprintf("failed to get absolute path for output dir: %s", err.Error()) + logEngineWarning(engine.name, workerId, msg) + return nil, fmt.Errorf("failed to get absolute path for output dir: %w", err) + } + + randStr := generateRandomString(6) + filename := fmt.Sprintf("%s_%s.yaml", chart.ChartName, randStr) + outputPath := filepath.Join(absOutputDir, filename) + + // Write rendered manifests to file + if err := os.WriteFile(outputPath, output, 0644); err != nil { + msg := fmt.Sprintf("failed to write rendered manifest to file: %s", err.Error()) + logEngineWarning(engine.name, workerId, msg) + return nil, fmt.Errorf("failed to write rendered manifest to file: %w", err) + } + + return &RenderResult{Chart: chart, ManifestPath: outputPath}, nil +} + +// Suffix the files just in case two charts end up having the same name +func generateRandomString(length int) string { + const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + b := make([]byte, length) + for i := range b { + b[i] = charset[rand.Int63()%int64(len(charset))] + } + return string(b) +} diff --git a/chartvalidator/checker/engine_chart_rendering_test.go b/chartvalidator/checker/engine_chart_rendering_test.go new file mode 100644 index 0000000..91a8da6 --- /dev/null +++ b/chartvalidator/checker/engine_chart_rendering_test.go @@ -0,0 +1,68 @@ +package main + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Helper function to create and start a chart rendering engine +func createEngine(mockExecutor *MockCommandExecutor, includeErrorChan bool) *ChartRenderingEngine { + engine := &ChartRenderingEngine{ + inputChan: make(chan ChartRenderParams), + resultChan: make(chan RenderResult), + outputDir: "test_output", + context: context.Background(), + executor: mockExecutor, + } + + if includeErrorChan { + engine.errorChan = make(chan ErrorResult) + } + + engine.Start(1) + return engine +} + +// Helper function to cleanup engine channels +func cleanupEngine(engine *ChartRenderingEngine) { + close(engine.inputChan) + engine.context.Done() +} + +func TestRenderBasics(t *testing.T) { + mockExecutor := createMockExecutor() + engine := createEngine(mockExecutor, false) + defer cleanupEngine(engine) + + testChart := createTestChart() + engine.inputChan <- testChart + + result := <-engine.resultChan + assertChartFieldsMatch(t, testChart, result.Chart) + + // Verify the command that was executed + expectedCommand := "helm template test-chart --release-name test-chart --repo https://example.com/charts -f values.yaml -f override.yaml --version 1.0.0 --include-crds" + actualCommand := mockExecutor.GetFullCommand() + assert.Equal(t, expectedCommand, actualCommand) +} + +func TestRenderBaseFileNotExist(t *testing.T) { + mockExecutor := createMockExecutor() + mockExecutor.FileExistsMap = map[string]bool{ + "values.yaml": false, + "override.yaml": true, + } + + engine := createEngine(mockExecutor, true) + defer cleanupEngine(engine) + + testChart := createTestChart() + engine.inputChan <- testChart + + errorResult := <-engine.errorChan + assert.Equal(t, errorResult.Chart.ChartName, testChart.ChartName) + assert.NotNil(t, errorResult.Error) + assert.Contains(t, errorResult.Error.Error(), "base values file does not exist") +} \ No newline at end of file diff --git a/chartvalidator/checker/engine_docker_validation.go b/chartvalidator/checker/engine_docker_validation.go new file mode 100644 index 0000000..47cc05c --- /dev/null +++ b/chartvalidator/checker/engine_docker_validation.go @@ -0,0 +1,228 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + "sync" + "time" +) + +// DockerImageValidationResult represents the result of validating a single Docker image + + + +type DockerImageValidationEngine struct { + inputChan chan ImageExtractionResult + outputChan chan DockerImageValidationResult + + executor CommandExecutor + context context.Context + + cache map[string]DockerImageValidationResult + pending map[string]*sync.WaitGroup + cacheLock sync.RWMutex + + name string + + workerWaitGroup sync.WaitGroup +} + +func (engine *DockerImageValidationEngine) Start(workerCount int) { + for i := 0; i < workerCount; i++ { + engine.workerWaitGroup.Add(1) + go func(workerId int) { + engine.worker(workerId) + }(i) + } + go engine.allDoneWorker() +} + +func (engine *DockerImageValidationEngine) allDoneWorker() { + engine.workerWaitGroup.Wait() + logEngineDebug(engine.name,-1,"all workers done, closing output channel") + close(engine.outputChan) +} + +func (engine *DockerImageValidationEngine) worker(workerId int) { + defer engine.workerWaitGroup.Done() + + for { + select { + case input, ok := <-engine.inputChan: + if !ok { + logEngineDebug(engine.name, workerId, "input closed") + return + } + image := input.Image + + // If there is a result pending, then wait for it and return it + pending_result := engine.waitForPending(input.Chart, image, workerId) + if pending_result != nil { + engine.outputChan <- *pending_result + continue + } + + // If already cached, return that one + engine.cacheLock.RLock() + if result, found := engine.cache[image]; found { + engine.cacheLock.RUnlock() + engine.outputChan <- result + continue + } + engine.cacheLock.RUnlock() + + engine.cacheLock.Lock() + engine.pending[image] = &sync.WaitGroup{} + pendingWG := engine.pending[image] + pendingWG.Add(1) + engine.cacheLock.Unlock() + + result := engine.validateSingleDockerImage(input.Chart, image, workerId) + + engine.cacheLock.Lock() + engine.cache[image] = result + pendingWG.Done() + delete(engine.pending, image) + engine.cacheLock.Unlock() + engine.outputChan <- result + + case <-engine.context.Done(): + logEngineDebug(engine.name,workerId,"context done") + return + } + } +} + +// Should there already be a pending validation for the image, wait for it to complete and return the result +func (engine *DockerImageValidationEngine) waitForPending(chart ChartRenderParams, image string, workerId int) *DockerImageValidationResult { + engine.cacheLock.RLock() + if wg, found := engine.pending[image]; found { + engine.cacheLock.RUnlock() + logEngineDebug(engine.name, workerId, fmt.Sprintf("waiting for pending: %s", image)) + wg.Wait() + engine.cacheLock.RLock() + if result, found := engine.cache[image]; found { + engine.cacheLock.RUnlock() + logEngineDebug(engine.name, workerId, fmt.Sprintf("submitting %s result we were waiting for", image)) + return &DockerImageValidationResult{ + Image: image, + Exists: result.Exists, + Error: result.Error, + Chart: chart, + } + } + logEngineWarning(engine.name, workerId, fmt.Sprintf("even after waiting no result found for %s", image)) + engine.cacheLock.RUnlock() + return nil + } + engine.cacheLock.RUnlock() + return nil +} + +func (engine *DockerImageValidationEngine) validateSingleDockerImage(chart ChartRenderParams, image string, workerId int) DockerImageValidationResult { + ctx, cancel := context.WithTimeout(engine.context, 2*time.Minute) + defer cancel() + + args := []string{"manifest", "inspect", image} + cmd := engine.executor.CommandContext(ctx, "docker", args...) + + // Print the command being executed using interface methods + cmdStr := fmt.Sprintf("%s %s", filepath.Base(cmd.GetPath()), strings.Join(cmd.GetArgs()[1:], " ")) + logEngineDebug(engine.name, workerId, fmt.Sprintf("executing: %s", cmdStr)) + + err := cmd.Run() + + exists := err == nil + if err != nil { + logEngineWarning(engine.name, workerId, fmt.Sprintf("failed: %s", cmdStr)) + } else { + logEngineDebug(engine.name, workerId, fmt.Sprintf("completed: %s", cmdStr)) + } + + return DockerImageValidationResult{ + Image: image, + Exists: exists, + Error: err, + Chart: chart, + } + +} + +// findJSONFiles recursively finds all JSON files in the given directory +func findJSONFiles(dir string) ([]string, error) { + var jsonFiles []string + + err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + if !d.IsDir() && strings.ToLower(filepath.Ext(path)) == ".json" { + jsonFiles = append(jsonFiles, path) + } + + return nil + }) + + return jsonFiles, err +} + +// extractAllImagesFromJSONFiles reads all JSON files and extracts Docker image names +func extractAllImagesFromJSONFiles(jsonFiles []string) ([]string, error) { + var allImages []string + + for _, jsonFile := range jsonFiles { + images, err := extractImagesFromJSONFile(jsonFile) + if err != nil { + return nil, fmt.Errorf("failed to extract images from %s: %w", jsonFile, err) + } + allImages = append(allImages, images...) + } + + return allImages, nil +} + +// extractImagesFromJSONFile reads a single JSON file and extracts the Docker image array +func extractImagesFromJSONFile(jsonFile string) ([]string, error) { + content, err := os.ReadFile(jsonFile) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w", err) + } + + var images []string + if err := json.Unmarshal(content, &images); err != nil { + return nil, fmt.Errorf("failed to unmarshal JSON: %w", err) + } + + return images, nil +} + +// deduplicateImages removes duplicate images while preserving order +func deduplicateImages(images []string) []string { + seen := make(map[string]bool) + var unique []string + + for _, image := range images { + if image != "" && !seen[image] { + seen[image] = true + unique = append(unique, image) + } + } + + // Sort for consistent output + sort.Strings(unique) + return unique +} + + +// createDockerManifestInspectCommand creates the docker command for validating an image +func createDockerManifestInspectCommand(image string) *exec.Cmd { + return exec.Command("docker", "manifest", "inspect", image) +} diff --git a/chartvalidator/checker/engine_docker_validation_test.go b/chartvalidator/checker/engine_docker_validation_test.go new file mode 100644 index 0000000..d64db16 --- /dev/null +++ b/chartvalidator/checker/engine_docker_validation_test.go @@ -0,0 +1,442 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +// Helper function to create a Docker validation engine +func createDockerValidationEngine(mockExecutor *MockCommandExecutor) *DockerImageValidationEngine { + return &DockerImageValidationEngine{ + inputChan: make(chan ImageExtractionResult), + outputChan: make(chan DockerImageValidationResult), + executor: mockExecutor, + context: createTestContext(), + cache: make(map[string]DockerImageValidationResult), + pending: make(map[string]*sync.WaitGroup), + name: "DockerImageValidationEngine", + } +} + +// Helper function to create test images slice +func createTestImages() []string { + return []string{ + "nginx:1.20", + "redis:6.2", + "nginx:1.20", + "nginx:1.20", + "nginx:1.20", + "nginx:1.20", + "redis:6.2", + "nginx:1.20", + "nginx:1.20", + "nginx:1.20", + "nginx:1.20", + "redis:6.2", + "nginx:1.20", + "nginx:1.20", + "nginx:1.20", + "nginx:1.20", + "redis:6.2", + "nginx:1.20", + "nginx:1.21", + "nginx:1.21", + "nginx:1.21", + "redis:6.2", + "nginx:1.20", + "nginx:1.20", + "nginx:1.20", + "nginx:1.20", + } +} + +// Helper function to send images to engine +func sendImagesToEngine(engine *DockerImageValidationEngine, images []string) { + go func() { + for _, img := range images { + engine.inputChan <- ImageExtractionResult{ + Image: img, + } + } + }() +} + +// Helper function to collect results from engine +func collectResults(engine *DockerImageValidationEngine, count int) map[string]DockerImageValidationResult { + resultStore := make(map[string]DockerImageValidationResult) + for i := 0; i < count; i++ { + result := <-engine.outputChan + resultStore[result.Image] = result + } + return resultStore +} + +// Helper function to create test files in directory +func createTestFiles(t *testing.T, tempDir string, files []string) { + for _, file := range files { + fullPath := filepath.Join(tempDir, file) + err := os.MkdirAll(filepath.Dir(fullPath), 0755) + if err != nil { + t.Fatalf("Failed to create directory: %v", err) + } + err = os.WriteFile(fullPath, []byte("{}"), 0644) + if err != nil { + t.Fatalf("Failed to create file: %v", err) + } + } +} + +// Helper function to create JSON file with content +func createJSONFile(t *testing.T, filePath string, content []string) { + jsonData, err := json.Marshal(content) + if err != nil { + t.Fatalf("Failed to marshal test data: %v", err) + } + err = os.WriteFile(filePath, jsonData, 0644) + if err != nil { + t.Fatalf("Failed to write JSON file: %v", err) + } +} + +func TestDockerImageValidationEngine(t *testing.T) { + mockExecutor := createMockExecutor() + engine := createDockerValidationEngine(mockExecutor) + engine.Start(1) + + img := "nginx:1.20" + go func(s string) { + engine.inputChan <- ImageExtractionResult{ + Image: s, + } + }(img) + + result := <-engine.outputChan + if result.Image != img { + t.Errorf("Expected image %s, got %s", img, result.Image) + } + if !result.Exists { + t.Errorf("Expected image %s to exist", img) + } + + assertCommandExecution(t, mockExecutor, "docker manifest inspect nginx:1.20") + engine.context.Done() +} + +func TestDockerImageValidationCache(t *testing.T) { + mockExecutor := createMockExecutorWithBehavior(func() error { + time.Sleep(100 * time.Millisecond) + return nil + }) + + engine := createDockerValidationEngine(mockExecutor) + engine.Start(2) + + images := createTestImages() + sendImagesToEngine(engine, images) + resultStore := collectResults(engine, len(images)) + + if len(resultStore) != 3 { + t.Errorf("Expected 3 unique results, got %d", len(resultStore)) + } + + engine.context.Done() +} + + +// TestFindJSONFiles tests finding JSON files in a directory +func TestFindJSONFiles(t *testing.T) { + tempDir := t.TempDir() + + jsonFiles := []string{ + "images1.json", + "images2.json", + "subdir/nested.json", + } + + nonJSONFiles := []string{ + "config.yaml", + "readme.txt", + "data.xml", + } + + allFiles := append(jsonFiles, nonJSONFiles...) + createTestFiles(t, tempDir, allFiles) + + foundFiles, err := findJSONFiles(tempDir) + if err != nil { + t.Fatalf("findJSONFiles failed: %v", err) + } + + if len(foundFiles) != len(jsonFiles) { + t.Errorf("Expected %d JSON files, found %d", len(jsonFiles), len(foundFiles)) + } + + // Convert to relative paths for comparison + foundSet := make(map[string]bool) + for _, file := range foundFiles { + rel, err := filepath.Rel(tempDir, file) + if err != nil { + t.Fatalf("Failed to get relative path: %v", err) + } + foundSet[rel] = true + } + + // Check all expected JSON files are found + for _, expected := range jsonFiles { + if !foundSet[expected] { + t.Errorf("Expected JSON file %s not found", expected) + } + } +} + +// TestExtractImagesFromJSONFile tests extracting images from a single JSON file +func TestExtractImagesFromJSONFile(t *testing.T) { + tempDir := t.TempDir() + + tests := []struct { + name string + jsonContent []string + expectedImages []string + expectError bool + }{ + { + name: "valid JSON with images", + jsonContent: []string{"nginx:1.20", "redis:6.2", "postgres:13"}, + expectedImages: []string{"nginx:1.20", "redis:6.2", "postgres:13"}, + expectError: false, + }, + { + name: "empty JSON array", + jsonContent: []string{}, + expectedImages: []string{}, + expectError: false, + }, + { + name: "single image", + jsonContent: []string{"alpine:latest"}, + expectedImages: []string{"alpine:latest"}, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + jsonFile := filepath.Join(tempDir, tt.name+".json") + createJSONFile(t, jsonFile, tt.jsonContent) + + images, err := extractImagesFromJSONFile(jsonFile) + + if tt.expectError && err == nil { + t.Errorf("Expected error but got none") + } + if !tt.expectError && err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !tt.expectError { + assertStringSlicesEqual(t, tt.expectedImages, images, "extracted images") + } + }) + } +} + +// TestExtractImagesFromJSONFileInvalidJSON tests handling of invalid JSON +func TestExtractImagesFromJSONFileInvalidJSON(t *testing.T) { + tempDir := t.TempDir() + + invalidJSON := `{"invalid": "json", "not": ["an", "array"]}` + jsonFile := filepath.Join(tempDir, "invalid.json") + err := os.WriteFile(jsonFile, []byte(invalidJSON), 0644) + if err != nil { + t.Fatalf("Failed to write invalid JSON file: %v", err) + } + + _, err = extractImagesFromJSONFile(jsonFile) + if err == nil { + t.Errorf("Expected error for invalid JSON, but got none") + } +} + +// TestExtractAllImagesFromJSONFiles tests extracting images from multiple JSON files +func TestExtractAllImagesFromJSONFiles(t *testing.T) { + tempDir := t.TempDir() + + testFiles := map[string][]string{ + "file1.json": {"nginx:1.20", "redis:6.2"}, + "file2.json": {"postgres:13", "alpine:latest"}, + "file3.json": {"node:16", "python:3.9"}, + } + + var allPaths []string + var expectedImages []string + + for filename, images := range testFiles { + jsonFile := filepath.Join(tempDir, filename) + createJSONFile(t, jsonFile, images) + allPaths = append(allPaths, jsonFile) + expectedImages = append(expectedImages, images...) + } + + allImages, err := extractAllImagesFromJSONFiles(allPaths) + if err != nil { + t.Fatalf("extractAllImagesFromJSONFiles failed: %v", err) + } + + if len(allImages) != len(expectedImages) { + t.Errorf("Expected %d total images, got %d", len(expectedImages), len(allImages)) + } + + // Check all expected images are present (order might differ) + imageSet := make(map[string]bool) + for _, img := range allImages { + imageSet[img] = true + } + + for _, expected := range expectedImages { + if !imageSet[expected] { + t.Errorf("Expected image %s not found in results", expected) + } + } +} + +// TestDeduplicateImages tests image deduplication +func TestDeduplicateImages(t *testing.T) { + tests := []struct { + name string + input []string + expectedUnique []string + }{ + { + name: "no duplicates", + input: []string{"nginx:1.20", "redis:6.2", "postgres:13"}, + expectedUnique: []string{"nginx:1.20", "postgres:13", "redis:6.2"}, // sorted + }, + { + name: "with duplicates", + input: []string{"nginx:1.20", "redis:6.2", "nginx:1.20", "postgres:13", "redis:6.2"}, + expectedUnique: []string{"nginx:1.20", "postgres:13", "redis:6.2"}, // sorted and deduplicated + }, + { + name: "empty input", + input: []string{}, + expectedUnique: []string{}, + }, + { + name: "with empty strings", + input: []string{"nginx:1.20", "", "redis:6.2", ""}, + expectedUnique: []string{"nginx:1.20", "redis:6.2"}, // empty strings filtered out + }, + { + name: "all same", + input: []string{"nginx:1.20", "nginx:1.20", "nginx:1.20"}, + expectedUnique: []string{"nginx:1.20"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := deduplicateImages(tt.input) + assertStringSlicesEqual(t, tt.expectedUnique, result, "deduplicated images") + }) + } +} + +// TestCreateDockerManifestInspectCommand tests the docker command creation +func TestCreateDockerManifestInspectCommand(t *testing.T) { + tests := []struct { + name string + image string + expectedArgs []string + }{ + { + name: "simple image", + image: "nginx:1.20", + expectedArgs: []string{"manifest", "inspect", "nginx:1.20"}, + }, + { + name: "image with registry", + image: "registry.example.com/my-app:v1.0", + expectedArgs: []string{"manifest", "inspect", "registry.example.com/my-app:v1.0"}, + }, + { + name: "image with digest", + image: "nginx@sha256:abc123", + expectedArgs: []string{"manifest", "inspect", "nginx@sha256:abc123"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cmd := createDockerManifestInspectCommand(tt.image) + + if filepath.Base(cmd.Path) != "docker" { + t.Errorf("Expected docker command, got %s", cmd.Path) + } + + // cmd.Args[0] is the program name, cmd.Args[1:] are the actual arguments + actualArgs := cmd.Args[1:] + assertStringSlicesEqual(t, tt.expectedArgs, actualArgs, "docker command arguments") + }) + } +} + +// TestValidateSingleDockerImage tests the validation logic (without actually calling docker) +func TestValidateSingleDockerImage(t *testing.T) { + tests := []struct { + name string + image string + expectedImage string + }{ + { + name: "valid image name", + image: "nginx:1.20", + expectedImage: "nginx:1.20", + }, + { + name: "image with registry", + image: "gcr.io/my-project/my-app:latest", + expectedImage: "gcr.io/my-project/my-app:latest", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cmd := createDockerManifestInspectCommand(tt.image) + assert.NotNil(t, cmd, "command should not be nil") + assert.Equal(t, cmd.Args[0], "docker", "command should be docker") + assert.Equal(t, cmd.Args[1], "manifest", "command should be manifest") + assert.Equal(t, cmd.Args[2], "inspect", "command should be inspect") + if cmd.Args[len(cmd.Args)-1] != tt.expectedImage { + t.Errorf("Expected command to include image %s, got args %v", tt.expectedImage, cmd.Args) + } + }) + } +} + +func TestDockerValidationError(t *testing.T) { + mockExecutor := createMockExecutorWithBehavior(func() error { + return fmt.Errorf("mocked docker error") + }) + + engine := createDockerValidationEngine(mockExecutor) + engine.Start(1) + + img := "nonexistent:image" + go func(s string) { + engine.inputChan <- ImageExtractionResult{ + Image: s, + } + }(img) + + result := <-engine.outputChan + assert.Equal(t, result.Image, img) + assert.NotNil(t, result.Error) + assertCommandExecution(t, mockExecutor, "docker manifest inspect nonexistent:image") + engine.context.Done() +} \ No newline at end of file diff --git a/chartvalidator/checker/engine_image_extraction.go b/chartvalidator/checker/engine_image_extraction.go new file mode 100644 index 0000000..80afd47 --- /dev/null +++ b/chartvalidator/checker/engine_image_extraction.go @@ -0,0 +1,369 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "gopkg.in/yaml.v3" +) + +// Consumes manifest files from inputChan, extracts Docker images, and sends results to outputChan +type ImageExtractionEngine struct { + // Each string should be a path to a manifest file + inputChan chan ManifestValidationResult + outputChan chan ImageExtractionResult + errorChan chan ErrorResult + + context context.Context + workerWaitGroup sync.WaitGroup + name string +} + +func (engine *ImageExtractionEngine) Start(workerCount int) { + for i := 0; i < workerCount; i++ { + engine.workerWaitGroup.Add(1) + go func(workerId int) { + engine.worker(workerId) + }(i) + } + go engine.allDoneWorker() +} + +func (engine *ImageExtractionEngine) allDoneWorker() { + logEngineDebug(engine.name,-1, "waiting for workers to finish") + engine.workerWaitGroup.Wait() + logEngineDebug(engine.name,-1,"all workers done, closing output channel") + close(engine.outputChan) +} + +func (engine *ImageExtractionEngine) worker(workerId int) { + defer engine.workerWaitGroup.Done() + for { + select { + case input, ok := <-engine.inputChan: + if !ok { + logEngineDebug(engine.name, workerId, "input closed") + return + } + images, err := engine.extractImagesFromFile(input.ManifestFile, workerId) + if err != nil { + logEngineWarning(engine.name, workerId, fmt.Sprintf("failed to extract images from %s: %v", input.ManifestFile, err)) + engine.errorChan <- ErrorResult{ + Chart: input.Chart, + Error: fmt.Errorf("failed to extract images from %s: %w", input.ManifestFile, err), + } + continue + } else { + uniqueImages := removeDuplicates(images) + // Send each extracted image as a separate result for the next step + logEngineDebug(engine.name, workerId, fmt.Sprintf("extracted %d images from %s", len(uniqueImages), input.ManifestFile)) + for _, img := range uniqueImages { + engine.outputChan <- ImageExtractionResult{ + Chart: input.Chart, + ManifestFile: input.ManifestFile, + Image: img, + } + } + } + case <-engine.context.Done(): + logEngineDebug(engine.name, workerId, "context done") + return + } + } +} + +func (engine *ImageExtractionEngine) extractImagesFromFile(file string, workerId int) ([]string, error) { + // Read the manifest file + content, err := os.ReadFile(file) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w", err) + } + + // Split content into multiple YAML documents (in case of multi-document files) + documents := strings.Split(string(content), "\n---\n") + var allImages []string + + for _, doc := range documents { + doc = strings.TrimSpace(doc) + if doc == "" { + continue + } + + // Extract images from this document + images, err := extractImageFromManifest(doc, workerId) + if err != nil { + // Don't fail the entire file for one bad document, just log and continue + logEngineWarning(engine.name, workerId, fmt.Sprintf("failed to extract images from document in %s: %v", file, err)) + continue + } + + allImages = append(allImages, images...) + } + + return allImages, nil +} + + +// extractDockerImages extracts Docker images from all manifest files in the specified directory +// and saves the results as JSON files in the output directory +func extractDockerImages(manifestDir, outputDir string, workerId int) error { + // Check if the source directory exists + if _, err := os.Stat(manifestDir); os.IsNotExist(err) { + return fmt.Errorf("directory %s does not exist", manifestDir) + } + + // Remove and recreate output directory + if err := recreateOutputDir(outputDir); err != nil { + return fmt.Errorf("failed to prepare output directory: %w", err) + } + + // Find all YAML files in the directory + yamlFiles, err := findYAMLFiles(manifestDir) + if err != nil { + return fmt.Errorf("failed to find YAML files in %s: %w", manifestDir, err) + } + + if len(yamlFiles) == 0 { + logEngineWarning("ImageExtractor", -1, fmt.Sprintf("No YAML files found in %s", manifestDir)) + return nil + } + + logEngineDebug("ImageExtractor", -1, fmt.Sprintf("Extracting Docker images from %d YAML files in %s", len(yamlFiles), manifestDir)) + + for _, yamlFile := range yamlFiles { + if err := extractImagesFromFile(yamlFile, manifestDir, outputDir, workerId); err != nil { + logEngineWarning("ImageExtractor", -1, fmt.Sprintf("failed to extract images from %s: %v", yamlFile, err)) + continue + } + } + + logEngineDebug("ImageExtractor", -1, fmt.Sprintf("Docker image extraction complete. JSON files written to %s/", outputDir)) + return nil +} + +// extractImagesFromFile extracts Docker images from a single manifest file and saves to JSON +func extractImagesFromFile(yamlFile, manifestDir, outputDir string, workerId int) error { + // Read the manifest file + content, err := os.ReadFile(yamlFile) + if err != nil { + return fmt.Errorf("failed to read file: %w", err) + } + + // Split content into multiple YAML documents (in case of multi-document files) + documents := strings.Split(string(content), "\n---\n") + var allImages []string + + for _, doc := range documents { + doc = strings.TrimSpace(doc) + if doc == "" { + continue + } + + // Extract images from this document + images, err := extractImageFromManifest(doc, workerId) + if err != nil { + // Don't fail the entire file for one bad document, just log and continue + logEngineWarning("ImageExtractor", workerId, fmt.Sprintf("failed to extract images from document in %s: %v", yamlFile, err)) + continue + } + + allImages = append(allImages, images...) + } + + // Remove duplicates from the image list + uniqueImages := removeDuplicates(allImages) + + // Create output file name based on manifest file name + relPath, err := filepath.Rel(manifestDir, yamlFile) + if err != nil { + return fmt.Errorf("failed to get relative path: %w", err) + } + + // Replace file extension with .json and replace path separators with underscores + jsonFileName := strings.ReplaceAll(relPath, string(filepath.Separator), "_") + jsonFileName = strings.TrimSuffix(jsonFileName, filepath.Ext(jsonFileName)) + ".json" + outputPath := filepath.Join(outputDir, jsonFileName) + + // Create JSON output + jsonData, err := json.MarshalIndent(uniqueImages, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal JSON: %w", err) + } + + // Write JSON file + if err := os.WriteFile(outputPath, jsonData, 0644); err != nil { + return fmt.Errorf("failed to write JSON file %s: %w", outputPath, err) + } + + logEngineDebug("ImageExtractor", -1, fmt.Sprintf("Extracted %d unique images from %s -> %s", len(uniqueImages), relPath, jsonFileName)) + return nil +} + + +func extractImagesFromDeployment(manifest map[string]interface{}) ([]string, error) { + // Validate this is a Deployment + kind, ok := manifest["kind"].(string) + if !ok || kind != "Deployment" { + return nil, fmt.Errorf("not a Deployment manifest") + } + + // Extract the pod section and use extractImagesFromPod to do the work + spec, ok := manifest["spec"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("missing spec in Deployment") + } + template, ok := spec["template"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("missing template in Deployment spec") + } + _, ok = template["spec"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("missing pod spec in Deployment template") + } + + return extractImagesFromPod(template) +} + +func extractImagesFromDaemonSet(manifest map[string]interface{}) ([]string, error) { + // Validate this is a DaemonSet + kind, ok := manifest["kind"].(string) + if !ok || kind != "DaemonSet" { + return nil, fmt.Errorf("not a DaemonSet manifest") + } + + // Extract the pod section and use extractImagesFromPod to do the work + spec, ok := manifest["spec"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("missing spec in DaemonSet") + } + template, ok := spec["template"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("missing template in DaemonSet spec") + } + _, ok = template["spec"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("missing pod spec in DaemonSet template") + } + + return extractImagesFromPod(template) +} + +func extractImagesFromStatefulSet(manifest map[string]interface{}) ([]string, error) { + // Validate this is a StatefulSet + kind, ok := manifest["kind"].(string) + if !ok || kind != "StatefulSet" { + return nil, fmt.Errorf("not a StatefulSet manifest") + } + + // Extract the pod section and use extractImagesFromPod to do the work + spec, ok := manifest["spec"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("missing spec in StatefulSet") + } + template, ok := spec["template"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("missing template in StatefulSet spec") + } + _, ok = template["spec"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("missing pod spec in StatefulSet template") + } + + return extractImagesFromPod(template) +} + +func extractImagesFromPod(manifest map[string]interface{}) ([]string, error) { + images := []string{} + + spec, ok := manifest["spec"].(map[string]interface{}) + if !ok { + return images, nil // No spec found + } + + // Check containers + if containers, ok := spec["containers"].([]interface{}); ok { + for _, c := range containers { + if cMap, ok := c.(map[string]interface{}); ok { + if img, ok := cMap["image"].(string); ok { + images = append(images, img) + } + } + } + } + + // Check initContainers + if initContainers, ok := spec["initContainers"].([]interface{}); ok { + for _, c := range initContainers { + if cMap, ok := c.(map[string]interface{}); ok { + if img, ok := cMap["image"].(string); ok { + images = append(images, img) + } + } + } + } + + return images, nil +} + + +// Extracts all of the docker images references from a given Kubernetes manifest. +// This function makes the assumption that only a single manifest is provided at +// a time, and that it is a Pod or Pod-like object (e.g. Deployment, DaemonSet). +func extractImageFromManifest(manifest string, workerId int) ([]string, error) { + imagesFound := []string{} + + // Parse the YAML manifest into a generic map. + var doc map[string]interface{} + if err := yaml.Unmarshal([]byte(manifest), &doc); err != nil { + return imagesFound, fmt.Errorf("failed to parse YAML: %w", err) + } + + kind, ok := doc["kind"].(string) + if !ok { + return imagesFound, fmt.Errorf("manifest missing 'kind' field") + } + + logEngineDebug("ImageExtractor", workerId, fmt.Sprintf("Inspecting %s %s", kind, fmt.Sprint(doc["metadata"].(map[string]interface{})["name"]))) + + switch kind { + case "Pod": + + images, err := extractImagesFromPod(doc) + if err != nil { + return imagesFound, err + } + imagesFound = append(imagesFound, images...) + case "Deployment": + images, err := extractImagesFromDeployment(doc) + if err != nil { + return imagesFound, err + } + imagesFound = append(imagesFound, images...) + case "DaemonSet": + images, err := extractImagesFromDaemonSet(doc) + if err != nil { + return imagesFound, err + } + imagesFound = append(imagesFound, images...) + + case "StatefulSet": + images, err := extractImagesFromStatefulSet(doc) + if err != nil { + return imagesFound, err + } + imagesFound = append(imagesFound, images...) + + default: + // For other kinds, we currently do not extract images. + logEngineDebug("ImageExtractor", workerId, fmt.Sprintf("Skipping image extraction for %s %s", kind, fmt.Sprint(doc["metadata"].(map[string]interface{})["name"]))) + return imagesFound, nil + } + + return imagesFound, nil + +} diff --git a/chartvalidator/checker/engine_image_extraction_test.go b/chartvalidator/checker/engine_image_extraction_test.go new file mode 100644 index 0000000..e714f6e --- /dev/null +++ b/chartvalidator/checker/engine_image_extraction_test.go @@ -0,0 +1,480 @@ +package main + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Sample manifests for testing +var sampleManifests = map[string]string{ + "pod_sample": ` +apiVersion: v1 +kind: Pod +metadata: + name: sample-pod +spec: + containers: + - name: sample-container + image: nginx:1.14.2 +`, + "deployment_sample": ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sample-deployment +spec: + replicas: 2 + selector: + matchLabels: + app: sample-app + template: + metadata: + labels: + app: sample-app + spec: + initContainers: + - name: init-sample + image: busybox:1.28 + command: ['sh', '-c', 'echo Init Container'] + containers: + - name: sample-container + image: nginx:1.14.2 + - name: another-container + image: redis:6.0 +`, + "daemonset_sample": ` +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: sample-daemonset +spec: + selector: + matchLabels: + app: sample-daemonset + template: + metadata: + labels: + app: sample-daemonset + spec: + containers: + - name: sample-container + image: nginx:1.14.2 + - name: another-container + image: redis:6.0 +`, + "statefulset_sample": ` +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: sample-statefulset +spec: + serviceName: "sample-service" + replicas: 3 + selector: + matchLabels: + app: sample-app + template: + metadata: + labels: + app: sample-app + spec: + containers: + - name: sample-container + image: nginx:1.14.2 + - name: another-container + image: redis:6.0 +`, +} + +// Helper function to get expected images for each manifest type +func getExpectedImages(manifestType string) map[string]bool { + switch manifestType { + case "pod_sample": + return map[string]bool{"nginx:1.14.2": true} + case "deployment_sample": + return map[string]bool{ + "nginx:1.14.2": true, + "redis:6.0": true, + "busybox:1.28": true, + } + case "daemonset_sample", "statefulset_sample": + return map[string]bool{ + "nginx:1.14.2": true, + "redis:6.0": true, + } + default: + return map[string]bool{} + } +} + +// Helper function to process engine with manifest +func processEngineWithManifest(t *testing.T, engine *ImageExtractionEngine, manifestPath string) []ImageExtractionResult { + input := ManifestValidationResult{ + ManifestFile: manifestPath, + } + + engine.inputChan <- input + close(engine.inputChan) + + return collectImageExtractionResults(engine) +} + + +func TestSingleImageExtraction(t *testing.T) { + verboseLogging = true + engine := createImageExtractionEngine() + engine.Start(1) + + tempDir := t.TempDir() + manifestPath := createTempManifestFile(t, tempDir, "test-deployment.yaml", sampleManifests["deployment_sample"]) + + results := processEngineWithManifest(t, engine, manifestPath) + + expectedImages := getExpectedImages("deployment_sample") + actualImages := extractImageNames(results) + + assertImageSetMatches(t, expectedImages, actualImages, "deployment_sample") + +} + +func TestImageExtractionEngine(t *testing.T) { + verboseLogging = true + + for name, manifest := range sampleManifests { + t.Run(name, func(t *testing.T) { + engine := createImageExtractionEngine() + engine.Start(1) + + tempDir := t.TempDir() + manifestPath := createTempManifestFile(t, tempDir, name+".yaml", manifest) + + results := processEngineWithManifest(t, engine, manifestPath) + actualImages := extractImageNames(results) + expectedImages := getExpectedImages(name) + + assertImageSetMatches(t, expectedImages, actualImages, name) + }) + } +} + + +func TestExtractImageFromManifest(t *testing.T) { + tests := []struct { + name string + manifestType string + expectedImages map[string]bool + }{ + { + name: "pod", + manifestType: "pod_sample", + expectedImages: map[string]bool{"nginx:1.14.2": true}, + }, + { + name: "deployment", + manifestType: "deployment_sample", + expectedImages: map[string]bool{ + "nginx:1.14.2": true, + "redis:6.0": true, + "busybox:1.28": true, + }, + }, + { + name: "daemonset", + manifestType: "daemonset_sample", + expectedImages: map[string]bool{ + "nginx:1.14.2": true, + "redis:6.0": true, + }, + }, + { + name: "statefulset", + manifestType: "statefulset_sample", + expectedImages: map[string]bool{ + "nginx:1.14.2": true, + "redis:6.0": true, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + images, err := extractImageFromManifest(sampleManifests[tt.manifestType], 0) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + + assertImageSetMatches(t, tt.expectedImages, images, tt.name) + }) + } +} + +func TestImageCheckStruct(t *testing.T) { + testChart := createTestChart() + + imgCheck := &imageCheck{ + Image: "alpine:latest", + Chart: testChart, + } + + // Test field assignments + assert.Equal(t, "alpine:latest", imgCheck.Image) + assert.Equal(t, "test-chart", imgCheck.Chart.ChartName) + assert.Equal(t, "development", imgCheck.Chart.Env) + + // Test that Present and Error fields can be set + imgCheck.Present = true + imgCheck.Error = nil + + assert.True(t, imgCheck.Present) + assert.Nil(t, imgCheck.Error) +} + + +func TestExtractImagesFromFile(t *testing.T) { + tempDir := t.TempDir() + manifestDir := filepath.Join(tempDir, "manifests") + outputDir := filepath.Join(tempDir, "output") + + createTestFiles(t, tempDir, []string{ + "manifests/subdir/.keep", // Create the subdirectory + "output/.keep", // Create the output directory + }) + + manifestContent := `apiVersion: apps/v1 +kind: Deployment +metadata: + name: duplicate-test +spec: + template: + spec: + containers: + - name: app1 + image: nginx:1.20 + - name: app2 + image: nginx:1.20 + - name: app3 + image: redis:6.2` + + manifestFile := createTempManifestFile(t, manifestDir, "subdir/duplicate.yaml", manifestContent) + + err := extractImagesFromFile(manifestFile, manifestDir, outputDir, 0) + assert.NoError(t, err) + + // Verify output file with underscore naming + expectedFileName := "subdir_duplicate.json" + outputFile := filepath.Join(outputDir, expectedFileName) + assert.FileExists(t, outputFile) + + // Read and verify content + jsonData, err := os.ReadFile(outputFile) + assert.NoError(t, err) + + var images []string + err = json.Unmarshal(jsonData, &images) + assert.NoError(t, err) + + // Should have only unique images + expectedImages := []string{"nginx:1.20", "redis:6.2"} + assert.Equal(t, len(expectedImages), len(images)) +} + +// TestRemoveDuplicates tests the removeDuplicates helper function +func TestRemoveDuplicates(t *testing.T) { + tests := []struct { + name string + input []string + expected []string + }{ + { + name: "no duplicates", + input: []string{"a", "b", "c"}, + expected: []string{"a", "b", "c"}, + }, + { + name: "with duplicates", + input: []string{"a", "b", "a", "c", "b"}, + expected: []string{"a", "b", "c"}, + }, + { + name: "empty slice", + input: []string{}, + expected: []string{}, + }, + { + name: "all same", + input: []string{"a", "a", "a"}, + expected: []string{"a"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := removeDuplicates(tt.input) + assertStringSlicesMatch(t, tt.expected, result, tt.name) + }) + } +} + + +// TestExtractImagesFromFile tests the extractImagesFromFi +// TestFindYAMLFiles tests the findYAMLFiles helper function +func TestFindYAMLFiles(t *testing.T) { + tempDir := t.TempDir() + + yamlFiles := []string{ + "test1.yaml", + "test2.yml", + "subdir/nested.yaml", + } + + nonYamlFiles := []string{ + "test.txt", + "config.json", + } + + allFiles := append(yamlFiles, nonYamlFiles...) + createTestFiles(t, tempDir, allFiles) + + foundFiles, err := findYAMLFiles(tempDir) + assert.NoError(t, err) + + assert.Equal(t, len(yamlFiles), len(foundFiles), "Expected correct number of YAML files") + + // Convert to relative paths for comparison + var relativeFound []string + for _, file := range foundFiles { + rel, err := filepath.Rel(tempDir, file) + assert.NoError(t, err) + relativeFound = append(relativeFound, rel) + } + + // Check that all expected YAML files are found + foundSet := make(map[string]bool) + for _, file := range relativeFound { + foundSet[file] = true + } + + for _, expected := range yamlFiles { + assert.True(t, foundSet[expected], "Expected YAML file %s not found", expected) + } +} + + +// TestExtractDockerImagesCommand tests the extract-docker-images command functionality +func TestExtractDockerImagesCommand(t *testing.T) { + tempDir := t.TempDir() + manifestDir := filepath.Join(tempDir, "manifests") + outputDir := filepath.Join(tempDir, "output") + + err := os.MkdirAll(manifestDir, 0755) + assert.NoError(t, err) + + manifestContent := `apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-app +spec: + template: + spec: + containers: + - name: app + image: nginx:1.20 + - name: sidecar + image: busybox:latest +--- +apiVersion: v1 +kind: Pod +metadata: + name: test-pod +spec: + containers: + - name: main + image: alpine:3.14 + initContainers: + - name: init + image: nginx:1.20` + + createTempManifestFile(t, manifestDir, "test-deployment.yaml", manifestContent) + + err = extractDockerImages(manifestDir, outputDir, 0) + assert.NoError(t, err) + + // Verify output directory was created + assert.DirExists(t, outputDir) + + // Verify JSON file was created + jsonFile := filepath.Join(outputDir, "test-deployment.json") + assert.FileExists(t, jsonFile) + + // Read and verify JSON content + jsonData, err := os.ReadFile(jsonFile) + assert.NoError(t, err) + + var images []string + err = json.Unmarshal(jsonData, &images) + assert.NoError(t, err) + + // Verify expected images (should be deduplicated) + expectedImages := []string{"nginx:1.20", "busybox:latest", "alpine:3.14"} + assert.Equal(t, len(expectedImages), len(images)) + + // Check each expected image is present + imageSet := make(map[string]bool) + for _, img := range images { + imageSet[img] = true + } + + for _, expected := range expectedImages { + assert.True(t, imageSet[expected], "Expected image %s not found in output", expected) + } +} + + +func TestDockerManifestCommand(t *testing.T) { + tests := []struct { + name string + image string + expectedCmd []string + }{ + { + name: "simple image", + image: "alpine:latest", + expectedCmd: []string{"docker", "manifest", "inspect", "alpine:latest"}, + }, + { + name: "nginx image", + image: "nginx:1.21", + expectedCmd: []string{"docker", "manifest", "inspect", "nginx:1.21"}, + }, + { + name: "redis image", + image: "redis:6.2", + expectedCmd: []string{"docker", "manifest", "inspect", "redis:6.2"}, + }, + { + name: "registry with path", + image: "ghcr.io/example/app:v1.0.0", + expectedCmd: []string{"docker", "manifest", "inspect", "ghcr.io/example/app:v1.0.0"}, + }, + { + name: "docker hub with path", + image: "docker.io/library/postgres:13", + expectedCmd: []string{"docker", "manifest", "inspect", "docker.io/library/postgres:13"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Validate command construction logic + cmd := tt.expectedCmd + assert.Equal(t, "docker", cmd[0], "Expected first arg to be 'docker'") + assert.Equal(t, "manifest", cmd[1], "Expected second arg to be 'manifest'") + assert.Equal(t, "inspect", cmd[2], "Expected third arg to be 'inspect'") + assert.Equal(t, tt.image, cmd[3], "Expected fourth arg to be the image name") + }) + } +} diff --git a/chartvalidator/checker/engine_manifest_validation.go b/chartvalidator/checker/engine_manifest_validation.go new file mode 100644 index 0000000..654ece0 --- /dev/null +++ b/chartvalidator/checker/engine_manifest_validation.go @@ -0,0 +1,112 @@ +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "sync" +) + + + + +type ManifestValidationResult struct { + ManifestFile string + Chart ChartRenderParams + Error error +} + +type ManifestValidationEngine struct { + inputChan chan RenderResult + resultChan chan ManifestValidationResult + errorChan chan ErrorResult + + context context.Context + executor CommandExecutor + name string + workerWaitGroup sync.WaitGroup +} + +func (engine *ManifestValidationEngine) Start(workerCount int) { + for i := 0; i < workerCount; i++ { + engine.workerWaitGroup.Add(1) + go func(workerId int) { + engine.worker(workerId) + }(i) + } + go engine.allDoneWorker() +} + +func (engine *ManifestValidationEngine) allDoneWorker() { + engine.workerWaitGroup.Wait() + logEngineDebug(engine.name,-1,"all workers done, closing output channel") + close(engine.resultChan) +} + +func (engine *ManifestValidationEngine) worker(workerId int) { + defer engine.workerWaitGroup.Done() + for { + select { + case input, ok := <-engine.inputChan: + if !ok { + logEngineDebug(engine.name, workerId, "input closed") + return + } + result, err := engine.validateManifest(input.Chart,input.ManifestPath, workerId) + if err != nil { + engine.errorChan <- ErrorResult{ + Chart: input.Chart, + Error: fmt.Errorf("failed to validate manifest %s: %w", input.ManifestPath, err), + } + continue + } else { + engine.resultChan <- *result + } + + case <-engine.context.Done(): + logEngineDebug(engine.name, workerId, "context done") + return + } + } +} + +func (engine *ManifestValidationEngine) validateManifest(chart ChartRenderParams, manifestFile string, workerId int) (*ManifestValidationResult, error) { + + if _, err := os.Stat(manifestFile); os.IsNotExist(err) { + msg := fmt.Sprintf("manifest file does not exist: %s", manifestFile) + logEngineWarning(engine.name, workerId, msg) + return nil, fmt.Errorf("manifest file does not exist: %s", manifestFile) + } + // Build kubeconform command + args := []string{ + "-strict", + "-summary", + "-schema-location", "default", + "-schema-location", "https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json", + "-schema-location", "ci/schemas/{{ .ResourceKind }}_{{ .ResourceAPIVersion }}.json", + "-verbose", + "-exit-on-error", + manifestFile, + } + + cmd := engine.executor.CommandContext(engine.context, + "kubeconform", args... + ) + cmdStr := fmt.Sprintf("%s %s", filepath.Base(cmd.GetPath()), strings.Join(args, " ")) + logEngineDebug(engine.name, workerId, fmt.Sprintf("executing: %s", cmdStr)) + + if err := cmd.Run(); err != nil { + msg := fmt.Sprintf("kubeconform command failed: %s", err.Error()) + logEngineWarning(engine.name, workerId, msg) + return nil, fmt.Errorf("kubeconform command failed: %w", err) + } + + logEngineDebug(engine.name, workerId, fmt.Sprintf("succeeded: %s", cmdStr)) + return &ManifestValidationResult{ + ManifestFile: manifestFile, + Error: nil, + Chart: chart, + }, nil +} diff --git a/chartvalidator/checker/engine_manifest_validation_test.go b/chartvalidator/checker/engine_manifest_validation_test.go new file mode 100644 index 0000000..1f47cba --- /dev/null +++ b/chartvalidator/checker/engine_manifest_validation_test.go @@ -0,0 +1,124 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestManifestValidationEngine(t *testing.T) { + mockExecutor := createManifestValidationMockExecutor() + engine := createManifestValidationEngine(mockExecutor) + engine.Start(1) + + testManifestFile := "test_data/example.yaml" + sendRenderResultToEngine(engine, testManifestFile) + + result := <-engine.resultChan + + // Verify no error occurred + assert.NoError(t, result.Error, "Expected no error during manifest validation") + + // Verify manifest file path is correct + assert.Equal(t, testManifestFile, result.ManifestFile, "Expected correct manifest file path") + + // Verify the command that was executed + expectedCommand := "kubeconform -strict -summary -schema-location default -schema-location https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json -schema-location ci/schemas/{{ .ResourceKind }}_{{ .ResourceAPIVersion }}.json -verbose -exit-on-error test_data/example.yaml" + assertCommandExecution(t, mockExecutor, expectedCommand) + + close(engine.inputChan) +} + +func TestManifestValidationEngineMultipleFiles(t *testing.T) { + verboseLogging = true + + testCases := []struct { + name string + manifestPath string + }{ + { + name: "deployment manifest", + manifestPath: "test_data/deployment.yaml", + }, + { + name: "service manifest", + manifestPath: "test_data/service.yaml", + }, + { + name: "configmap manifest", + manifestPath: "test_data/configmap.yaml", + }, + { + name: "deployment manifest2", + manifestPath: "test_data/deployment.yaml", + }, + { + name: "service manifest2", + manifestPath: "test_data/service.yaml", + }, + { + name: "configmap manifest2", + manifestPath: "test_data/configmap.yaml", + }, + } + + mockExecutor := createManifestValidationMockExecutor() + engine := createManifestValidationEngine(mockExecutor) + engine.Start(2) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + sendRenderResultToEngine(engine, tc.manifestPath) + + var result ManifestValidationResult + select { + case result = <-engine.resultChan: + t.Log("ok") + case errResult := <-engine.errorChan: + t.Fatalf("Expected no error for manifest %s, got error: %v", tc.manifestPath, errResult.Error) + } + + // Verify no error occurred + assert.NoError(t, result.Error, "Expected no error during manifest validation") + + // Verify manifest file path is correct + assert.Equal(t, tc.manifestPath, result.ManifestFile, "Expected correct manifest file path") + + // Verify command contains the manifest path + actualCommand := mockExecutor.GetFullCommand() + assert.Contains(t, actualCommand, tc.manifestPath, "Expected command to contain manifest path") + + }) + } + close(engine.inputChan) + engine.workerWaitGroup.Wait() +} + +func TestManifestValidationEngineWithError(t *testing.T) { + // Create mock executor that returns an error + mockExecutor := createMockExecutorWithBehavior(func() error { + return assert.AnError + }) + mockExecutor.Output = []byte("validation failed") + + engine := createManifestValidationEngine(mockExecutor) + engine.Start(1) + + testManifestFile := "test_data/invalid.yaml" + sendRenderResultToEngine(engine, testManifestFile) + + // Should receive an error result + select { + case result := <-engine.resultChan: + // If we get a result, it should have an error + assert.Error(t, result.Error, "Expected an error for invalid manifest") + assert.Equal(t, testManifestFile, result.ManifestFile, "Expected correct manifest file path even with error") + case errorResult := <-engine.errorChan: + // Or we might get an error result + assert.Error(t, errorResult.Error, "Expected an error for invalid manifest") + } + + close(engine.inputChan) + engine.workerWaitGroup.Wait() +} \ No newline at end of file diff --git a/chartvalidator/checker/exec_interface.go b/chartvalidator/checker/exec_interface.go new file mode 100644 index 0000000..f4a3350 --- /dev/null +++ b/chartvalidator/checker/exec_interface.go @@ -0,0 +1,59 @@ +package main + +import ( + "context" + "os" + "os/exec" +) + +// CommandExecutor interface allows for mocking exec.Command +type CommandExecutor interface { + CommandContext(ctx context.Context, name string, args ...string) Command + FileExists(path string) bool +} + +// Command interface wraps exec.Cmd for testing +type Command interface { + SetDir(dir string) + CombinedOutput() ([]byte, error) + Run() error + GetPath() string + GetArgs() []string +} + +// RealCommandExecutor implements CommandExecutor using the real exec package +type RealCommandExecutor struct{} + +func (r *RealCommandExecutor) CommandContext(ctx context.Context, name string, args ...string) Command { + return &RealCommand{cmd: exec.CommandContext(ctx, name, args...)} +} + +// RealCommand wraps exec.Cmd +type RealCommand struct { + cmd *exec.Cmd +} + +func (r *RealCommand) SetDir(dir string) { + r.cmd.Dir = dir +} + +func (r *RealCommand) CombinedOutput() ([]byte, error) { + return r.cmd.CombinedOutput() +} + +func (r *RealCommand) Run() error { + return r.cmd.Run() +} + +func (r *RealCommand) GetPath() string { + return r.cmd.Path +} + +func (r *RealCommand) GetArgs() []string { + return r.cmd.Args +} + +func (r *RealCommandExecutor) FileExists(path string) bool { + _, err := os.Stat(path) + return !os.IsNotExist(err) +} \ No newline at end of file diff --git a/chartvalidator/checker/exec_mock.go b/chartvalidator/checker/exec_mock.go new file mode 100644 index 0000000..e1bcc97 --- /dev/null +++ b/chartvalidator/checker/exec_mock.go @@ -0,0 +1,79 @@ +package main + +import ( + "context" + "strings" +) + +// MockCommandExecutor captures command execution for testing +type MockCommandExecutor struct { + LastCommand string + LastArgs []string + Output []byte + Error error + BehaviorOnRun func() error + FileExistsMap map[string]bool +} + +func (m *MockCommandExecutor) CommandContext(ctx context.Context, name string, args ...string) Command { + m.LastCommand = name + m.LastArgs = args + return &MockCommand{ + executor: m, + output: m.Output, + err: m.Error, + } +} + +func (m *MockCommandExecutor) GetFullCommand() string { + if m.LastCommand == "" { + return "" + } + return m.LastCommand + " " + strings.Join(m.LastArgs, " ") +} + +// MockCommand implements Command interface for testing +type MockCommand struct { + executor *MockCommandExecutor + output []byte + err error + dir string +} + +func (m *MockCommand) SetDir(dir string) { + m.dir = dir +} + +func (m *MockCommand) CombinedOutput() ([]byte, error) { + return m.output, m.err +} + +func (m *MockCommand) Run() error { + if m.executor.BehaviorOnRun != nil { + return m.executor.BehaviorOnRun() + } + return m.err +} + +func (m *MockCommand) GetPath() string { + return m.executor.LastCommand +} + +func (m *MockCommand) GetArgs() []string { + // Return the full args array (including the command name as args[0]) + if m.executor.LastCommand == "" { + return []string{} + } + return append([]string{m.executor.LastCommand}, m.executor.LastArgs...) +} + +func (m *MockCommandExecutor) FileExists(path string) bool { + if m.FileExistsMap != nil { + exists, found := m.FileExistsMap[path] + if found { + return exists + } + } + // Default to true if not specified + return true +} \ No newline at end of file diff --git a/chartvalidator/checker/go.mod b/chartvalidator/checker/go.mod new file mode 100644 index 0000000..4e30820 --- /dev/null +++ b/chartvalidator/checker/go.mod @@ -0,0 +1,13 @@ +module github.com/interledger/interledger-app-deploy/ci + +go 1.24.5 + +require ( + github.com/stretchr/testify v1.11.1 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect +) diff --git a/chartvalidator/checker/go.sum b/chartvalidator/checker/go.sum new file mode 100644 index 0000000..c4c1710 --- /dev/null +++ b/chartvalidator/checker/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/chartvalidator/checker/main.go b/chartvalidator/checker/main.go new file mode 100644 index 0000000..9c6f361 --- /dev/null +++ b/chartvalidator/checker/main.go @@ -0,0 +1,130 @@ +package main + +import ( + "context" + "flag" + "fmt" + "os" +) + +var srcPrefix string = "../" +var verboseLogging bool = false + +func main() { + if len(os.Args) < 2 { + printUsage() + os.Exit(1) + } + + command := os.Args[1] + args := os.Args[2:] + + switch command { + case "run-checks": + runChartChecksCommand(args) + case "help", "-h", "--help": + printUsage() + default: + fmt.Fprintf(os.Stderr, "Unknown command: %s\n", command) + printUsage() + os.Exit(1) + } +} + +func printUsage() { + fmt.Println("Usage: run-manifest-checks [flags]") + fmt.Println("") + fmt.Println("Commands:") + fmt.Println(" run-checks Runs all available checks on the charts for given environment.") + fmt.Println(" help Displays this help message.") + fmt.Println("") + fmt.Println("Use 'run-manifest-checks -h' to see command-specific flags.") +} + + + +func runChartChecksCommand(args []string) { + fs := flag.NewFlagSet("run-checks", flag.ExitOnError) + + var ( + singleEnv = fs.String("env", "", "Only process this environment (folder name under -envdir).") + envDir = fs.String("envdir", "../env", "Base directory containing environment folders.") + outputDir = fs.String("output", "manifests", "Output directory for rendered charts.") + verbose = fs.Bool("v", false, "Enable verbose logging.") + ) + + fs.Usage = func() { + fmt.Println("Usage: run-manifest-checks run-checks [flags]") + fmt.Println("") + fmt.Println("Will run a series of checks against all charts found in the ApplicationSets in the specified environment.") + fmt.Println("Steps are as follows:") + fmt.Println(" 1. Find all charts referenced in ApplicationSets in the specified environment.") + fmt.Println(" 2. Render each chart with its values using Helm.") + fmt.Println(" 3. Validate the rendered manifests using kubeconform.") + fmt.Println(" 4. Extract Docker image references from the manifests.") + fmt.Println(" 5. Validate that each Docker image exists in the registry.") + fmt.Println("") + fmt.Println("Docker needs to be authenticated to the registries used by the charts for image validation to work.") + fmt.Println("") + fs.PrintDefaults() + } + + if err := fs.Parse(args); err != nil { + os.Exit(1) + } + + verboseLogging = *verbose + + if err := runAllChartChecks(*singleEnv, *envDir, *outputDir); err != nil { + fmt.Fprintf(os.Stderr, "Error running chart checks: %v\n", err) + os.Exit(1) + } + +} + + +func runAllChartChecks(singleEnv, envDir, outputDir string) error { + fmt.Println("Starting chart checks...") + params, err := findChartsInAppsets(envDir, singleEnv) + if err != nil { + return fmt.Errorf("failed to find charts in ApplicationSets: %w", err) + } + + fmt.Printf("Found %d charts to process.\n", len(params)) + + context := context.Background() + + // Delete output dir if it exists + if err := os.RemoveAll(outputDir); err != nil { + return fmt.Errorf("failed to clear output directory: %w", err) + } + + appChecker := NewAppCheckerEngine(context, outputDir) + appChecker.Start(10) + + go func() { + for _, p := range params { + appChecker.inputChan <- AppCheckInstruction{Chart: p} + } + close(appChecker.inputChan) + }() + + success := true + + for result := range appChecker.resultChan { + if result.Error != nil { + fmt.Printf(">>> chart %s %s from env %s with image %s: ✗ Error: %v\n", result.Chart.ChartName, result.Chart.ChartVersion, result.Chart.Env, result.Image, result.Error) + success = false + } else { + fmt.Printf(">>> chart %s %s from env %s with image %s: ✓ All checks passed\n", result.Chart.ChartName, result.Chart.ChartVersion, result.Chart.Env, result.Image) + } + } + + if success { + fmt.Println("All chart checks completed successfully.") + return nil + } else { + fmt.Println("Some chart checks failed. See above for details.") + return fmt.Errorf("one or more chart checks failed") + } +} \ No newline at end of file diff --git a/chartvalidator/checker/test_data/configmap.yaml b/chartvalidator/checker/test_data/configmap.yaml new file mode 100644 index 0000000..5433155 --- /dev/null +++ b/chartvalidator/checker/test_data/configmap.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +data: + BACKEND_GRPC_URL: wallet-backend-service-grpc:8448 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: wallet-admin-config + app.kubernetes.io/version: v1.2.16 + helm.sh/chart: ilf-wallet-admin-2.1.1 + name: wallet-admin-config \ No newline at end of file diff --git a/chartvalidator/checker/test_data/deployment.yaml b/chartvalidator/checker/test_data/deployment.yaml new file mode 100644 index 0000000..6ba65e5 --- /dev/null +++ b/chartvalidator/checker/test_data/deployment.yaml @@ -0,0 +1,67 @@ +# Source: ilf-wallet-admin/templates/deployment.server.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: wallet-admin-server + app.kubernetes.io/version: v1.2.16 + helm.sh/chart: ilf-wallet-admin-2.1.1 + name: wallet-admin-server +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/name: wallet-admin-server + template: + metadata: + annotations: + ilf-configmap-worker-sha256: f980bf57425d2aa2920a40909aa1a3a8e639c3fa332b0c92292737793bbbbcc8 + labels: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/name: wallet-admin-server + spec: + automountServiceAccountToken: true + containers: + - envFrom: + - configMapRef: + name: wallet-admin-config + image: europe-west4-docker.pkg.dev/wallet-dev-462809/interledger-app/botanist:v1.3.2-canary + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 3000 + initialDelaySeconds: 10 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + name: server + ports: + - containerPort: 3000 + name: http + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 3000 + initialDelaySeconds: 10 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 50m + memory: 128Mi + securityContext: null + securityContext: null + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule diff --git a/chartvalidator/checker/test_data/example.yaml b/chartvalidator/checker/test_data/example.yaml new file mode 100644 index 0000000..1bdba78 --- /dev/null +++ b/chartvalidator/checker/test_data/example.yaml @@ -0,0 +1,123 @@ +--- +# Source: ilf-wallet-admin/templates/pdb.server.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + labels: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: wallet-admin-server + app.kubernetes.io/version: v1.2.16 + helm.sh/chart: ilf-wallet-admin-2.1.1 + name: wallet-admin-server +spec: + minAvailable: 1 + selector: + matchLabels: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/name: wallet-admin-server +--- +# Source: ilf-wallet-admin/templates/configMap.yaml +apiVersion: v1 +data: + BACKEND_GRPC_URL: wallet-backend-service-grpc:8448 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: wallet-admin-config + app.kubernetes.io/version: v1.2.16 + helm.sh/chart: ilf-wallet-admin-2.1.1 + name: wallet-admin-config +--- +# Source: ilf-wallet-admin/templates/service.http.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: null + labels: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: wallet-admin-service + app.kubernetes.io/version: v1.2.16 + helm.sh/chart: ilf-wallet-admin-2.1.1 + name: wallet-admin-service +spec: + ports: + - name: http + port: 3000 + protocol: TCP + targetPort: 3000 + selector: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/name: wallet-admin-server + type: ClusterIP +--- +# Source: ilf-wallet-admin/templates/deployment.server.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: wallet-admin-server + app.kubernetes.io/version: v1.2.16 + helm.sh/chart: ilf-wallet-admin-2.1.1 + name: wallet-admin-server +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/name: wallet-admin-server + template: + metadata: + annotations: + ilf-configmap-worker-sha256: f980bf57425d2aa2920a40909aa1a3a8e639c3fa332b0c92292737793bbbbcc8 + labels: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/name: wallet-admin-server + spec: + automountServiceAccountToken: true + containers: + - envFrom: + - configMapRef: + name: wallet-admin-config + image: europe-west4-docker.pkg.dev/wallet-dev-462809/interledger-app/botanist:v1.3.2-canary + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 3000 + initialDelaySeconds: 10 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + name: server + ports: + - containerPort: 3000 + name: http + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 3000 + initialDelaySeconds: 10 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 50m + memory: 128Mi + securityContext: null + securityContext: null + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule diff --git a/chartvalidator/checker/test_data/service.yaml b/chartvalidator/checker/test_data/service.yaml new file mode 100644 index 0000000..914dbf5 --- /dev/null +++ b/chartvalidator/checker/test_data/service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: null + labels: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: wallet-admin-service + app.kubernetes.io/version: v1.2.16 + helm.sh/chart: ilf-wallet-admin-2.1.1 + name: wallet-admin-service +spec: + ports: + - name: http + port: 3000 + protocol: TCP + targetPort: 3000 + selector: + app.kubernetes.io/instance: wallet-admin + app.kubernetes.io/name: wallet-admin-server + type: ClusterIP \ No newline at end of file diff --git a/chartvalidator/checker/types.go b/chartvalidator/checker/types.go new file mode 100644 index 0000000..7bea375 --- /dev/null +++ b/chartvalidator/checker/types.go @@ -0,0 +1,72 @@ +package main + +import ( + "os/exec" + "sync" +) + +type ErrorResult struct { + Chart ChartRenderParams + Error error +} + +type DockerImageValidationResult struct { + Chart ChartRenderParams + Image string + Exists bool + Error error +} + +type ImageExtractionResult struct { + Chart ChartRenderParams + ManifestFile string + Image string +} + +// ChartRenderParams represents a Helm chart configuration extracted from ApplicationSet files +type ChartRenderParams struct { + Env string `json:"env"` + ChartName string `json:"chartName"` + RepoURL string `json:"repoURL"` + ChartVersion string `json:"chartVersion"` + BaseValuesFile string `json:"baseValuesFile"` + ValuesOverride string `json:"valuesOverride"` +} + +// task represents a validation task with a chart and command +type task struct { + Chart ChartRenderParams + Cmd *exec.Cmd +} + +// imageCheck represents the result of checking if a Docker image exists +type imageCheck struct { + Chart ChartRenderParams + Image string + Present bool + Error error +} + +// validationResult represents the result of a kubeconform validation +type validationResult struct { + Chart ChartRenderParams + RC int + Out string + Err string +} + +// validationFailure represents a failed validation with chart and details +type validationFailure struct { + Chart ChartRenderParams + RC int + Output string +} + +// imageCheckSetup manages image checking infrastructure +type imageCheckSetup struct { + inputPipe chan *imageCheck + resultPipe chan *imageCheck + results map[string]*imageCheck + workerWg sync.WaitGroup + resultsWg sync.WaitGroup +} \ No newline at end of file diff --git a/chartvalidator/checker/utils.go b/chartvalidator/checker/utils.go new file mode 100644 index 0000000..af65904 --- /dev/null +++ b/chartvalidator/checker/utils.go @@ -0,0 +1,131 @@ +package main + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" +) + +// ANSI color codes +const ( + colorReset = "\033[0m" + colorRed = "\033[31m" + colorYellow = "\033[33m" + colorCyan = "\033[36m" +) + +// logEngine prints formatted log messages with color coding based on level +func logEngine(level, engineName string, workerId int, message string) { + var color string + switch strings.ToUpper(level) { + case "ERROR": + color = colorRed + case "WARNING": + color = colorYellow + case "DEBUG": + color = colorCyan + default: + color = colorReset + } + + // Split message into lines if it contains newlines + lines := strings.Split(message, "\n") + + // Print first line with full prefix and color + fmt.Printf("%s[%s]\t[%s Worker %d]\t%s%s\n", color, level, engineName, workerId, lines[0], colorReset) + + // Print additional lines with empty columns for alignment + for i := 1; i < len(lines); i++ { + fmt.Printf("\t\t%s\n", lines[i]) + } +} + +func logEngineDebug(engineName string, workerId int, message string) { + if !verboseLogging { + return + } + logEngine("DEBUG", engineName, workerId, message) +} + +func logEngineWarning(engineName string, workerId int, message string) { + logEngine("WARNING", engineName, workerId, message) +} + +func logEngineError(engineName string, workerId int, message string) { + logEngine("ERROR", engineName, workerId, message) +} + +// getJobCount returns the number of parallel jobs to run +func getJobCount() int { + if s := os.Getenv("KUBECONFORM_JOBS"); strings.TrimSpace(s) != "" { + if n, err := parseInt(s); err == nil && n > 0 { + return n + } + } + n := runtime.NumCPU() + if n <= 0 { + n = 4 + } + return n +} + +// parseInt parses a string to integer, returning error if invalid +func parseInt(s string) (int, error) { + var n int + _, err := fmt.Sscanf(strings.TrimSpace(s), "%d", &n) + return n, err +} + +// recreateOutputDir removes and recreates the output directory +func recreateOutputDir(outputDir string) error { + if err := os.RemoveAll(outputDir); err != nil { + return fmt.Errorf("failed to remove output directory: %w", err) + } + + if err := os.MkdirAll(outputDir, 0755); err != nil { + return fmt.Errorf("failed to create output directory: %w", err) + } + + return nil +} + +// walkFiles returns all files under root that pass the filter +func walkFiles(root string, filter func(string, fs.DirEntry) bool) ([]string, error) { + var files []string + err := filepath.WalkDir(root, func(p string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() && filter(p, d) { + files = append(files, p) + } + return nil + }) + return files, err +} + +// removeDuplicates removes duplicate strings from a slice while preserving order +func removeDuplicates(slice []string) []string { + seen := make(map[string]bool) + var result []string + + for _, item := range slice { + if !seen[item] { + seen[item] = true + result = append(result, item) + } + } + + return result +} + +// findYAMLFiles discovers all YAML files in a directory recursively +func findYAMLFiles(dir string) ([]string, error) { + return walkFiles(dir, func(path string, d fs.DirEntry) bool { + name := strings.ToLower(d.Name()) + return strings.HasSuffix(name, ".yaml") || strings.HasSuffix(name, ".yml") + }) +} \ No newline at end of file diff --git a/chartvalidator/checker/utils_test.go b/chartvalidator/checker/utils_test.go new file mode 100644 index 0000000..ab403c0 --- /dev/null +++ b/chartvalidator/checker/utils_test.go @@ -0,0 +1,161 @@ +package main + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Helper function to create a mock executor with default settings +func createMockExecutor() *MockCommandExecutor { + return &MockCommandExecutor{ + Output: []byte("mocked helm output"), + Error: nil, + } +} + +// Helper function to create a mock executor with custom behavior +func createMockExecutorWithBehavior(behaviorFunc func() error) *MockCommandExecutor { + mockExecutor := createMockExecutor() + mockExecutor.BehaviorOnRun = behaviorFunc + return mockExecutor +} + +// Helper function to create a context for tests +func createTestContext() context.Context { + return context.Background() +} + +// Common validation helper for string slices +func assertStringSlicesEqual(t *testing.T, expected, actual []string, message string) { + assert.Equal(t, len(expected), len(actual), "Length mismatch: %s", message) + for i, expectedVal := range expected { + if i < len(actual) { + assert.Equal(t, expectedVal, actual[i], "Mismatch at index %d: %s", i, message) + } + } +} + +// Helper to assert command execution +func assertCommandExecution(t *testing.T, mockExecutor *MockCommandExecutor, expectedCommand string) { + actualCommand := mockExecutor.GetFullCommand() + assert.Equal(t, expectedCommand, actualCommand) +} + +// Helper to create a default test chart for chart rendering tests +func createTestChart() ChartRenderParams { + return ChartRenderParams{ + Env: "development", + ChartName: "test-chart", + RepoURL: "https://example.com/charts", + BaseValuesFile: "values.yaml", + ValuesOverride: "override.yaml", + ChartVersion: "1.0.0", + } +} + +// Helper function to assert chart fields match +func assertChartFieldsMatch(t *testing.T, expected, actual ChartRenderParams) { + assert.Equal(t, expected.ChartName, actual.ChartName) + assert.Equal(t, expected.RepoURL, actual.RepoURL) + assert.Equal(t, expected.BaseValuesFile, actual.BaseValuesFile) + assert.Equal(t, expected.ValuesOverride, actual.ValuesOverride) + assert.Equal(t, expected.ChartVersion, actual.ChartVersion) +} + +// Helper function to create an image extraction engine +func createImageExtractionEngine() *ImageExtractionEngine { + return &ImageExtractionEngine{ + inputChan: make(chan ManifestValidationResult), + outputChan: make(chan ImageExtractionResult), + context: createTestContext(), + } +} + +// Helper function to create a temp manifest file +func createTempManifestFile(t *testing.T, tempDir, filename, content string) string { + manifestPath := filepath.Join(tempDir, filename) + err := os.MkdirAll(filepath.Dir(manifestPath), 0755) + if err != nil { + t.Fatalf("Failed to create directory: %v", err) + } + err = os.WriteFile(manifestPath, []byte(content), 0644) + if err != nil { + t.Fatalf("Failed to write temp manifest file: %v", err) + } + return manifestPath +} + +// Helper function to collect image extraction results +func collectImageExtractionResults(engine *ImageExtractionEngine) []ImageExtractionResult { + results := make([]ImageExtractionResult, 0) + for extractionResult := range engine.outputChan { + results = append(results, extractionResult) + } + return results +} + +// Helper function to extract image names from results +func extractImageNames(results []ImageExtractionResult) []string { + var images []string + for _, result := range results { + images = append(images, result.Image) + } + return images +} + +// Helper function to assert image set matches +func assertImageSetMatches(t *testing.T, expected map[string]bool, actual []string, testName string) { + if len(actual) != len(expected) { + t.Errorf("Expected %d images, got %d for %s", len(expected), len(actual), testName) + } + for _, img := range actual { + if !expected[img] { + t.Errorf("Unexpected image %s found for %s", img, testName) + } + } +} + +// Helper function to assert string slices match exactly +func assertStringSlicesMatch(t *testing.T, expected, actual []string, testName string) { + if len(actual) != len(expected) { + t.Errorf("Expected %d items, got %d for %s", len(expected), len(actual), testName) + return + } + for i, expectedItem := range expected { + if i < len(actual) && actual[i] != expectedItem { + t.Errorf("Expected item %s at index %d, got %s for %s", expectedItem, i, actual[i], testName) + } + } +} + +// Helper function to create a manifest validation engine +func createManifestValidationEngine(mockExecutor *MockCommandExecutor) *ManifestValidationEngine { + return &ManifestValidationEngine{ + inputChan: make(chan RenderResult), + resultChan: make(chan ManifestValidationResult), + context: createTestContext(), + executor: mockExecutor, + errorChan: make(chan ErrorResult), + } +} + +// Helper function to send render result to manifest validation engine +func sendRenderResultToEngine(engine *ManifestValidationEngine, manifestPath string) { + go func() { + engine.inputChan <- RenderResult{ + ManifestPath: manifestPath, + } + }() +} + +// Helper function to create default mock executor for manifest validation +func createManifestValidationMockExecutor() *MockCommandExecutor { + return &MockCommandExecutor{ + Output: []byte("mocked kubeconform output"), + Error: nil, + } +} \ No newline at end of file From 8eba2cdbc4873d04dfb282277cb814b5dc03839b Mon Sep 17 00:00:00 2001 From: Stephan Butler Date: Fri, 24 Oct 2025 12:30:32 +0200 Subject: [PATCH 2/4] fix: tweaking --- chartvalidator/.gitignore | 1 + chartvalidator/Dockerfile | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 chartvalidator/.gitignore diff --git a/chartvalidator/.gitignore b/chartvalidator/.gitignore new file mode 100644 index 0000000..1c2f433 --- /dev/null +++ b/chartvalidator/.gitignore @@ -0,0 +1 @@ +tmp \ No newline at end of file diff --git a/chartvalidator/Dockerfile b/chartvalidator/Dockerfile index 80756cd..92919fc 100644 --- a/chartvalidator/Dockerfile +++ b/chartvalidator/Dockerfile @@ -22,6 +22,10 @@ RUN ./get_helm.sh # Install kustomize RUN curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" -o install_kustomize.sh +# Install kubeconform form alpine +RUN curl -sSL "https://github.com/yannh/kubeconform/releases/latest/download/kubeconform-linux-amd64" -o /usr/local/bin/kubeconform \ + && chmod +x /usr/local/bin/kubeconform + RUN chmod +x install_kustomize.sh RUN ./install_kustomize.sh /usr/local/bin From 05126b6ac8654305663d652c62a3f8f02fc88dc0 Mon Sep 17 00:00:00 2001 From: Stephan Butler Date: Fri, 24 Oct 2025 12:33:39 +0200 Subject: [PATCH 3/4] fix: kubeconform will now be included --- chartvalidator/Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/chartvalidator/Dockerfile b/chartvalidator/Dockerfile index 92919fc..b088a45 100644 --- a/chartvalidator/Dockerfile +++ b/chartvalidator/Dockerfile @@ -23,8 +23,9 @@ RUN ./get_helm.sh RUN curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" -o install_kustomize.sh # Install kubeconform form alpine -RUN curl -sSL "https://github.com/yannh/kubeconform/releases/latest/download/kubeconform-linux-amd64" -o /usr/local/bin/kubeconform \ - && chmod +x /usr/local/bin/kubeconform +RUN curl -sSL "https://github.com/yannh/kubeconform/releases/download/v0.7.0/kubeconform-linux-amd64.tar.gz" -o /usr/local/bin/kubeconform +RUN tar -xzf /usr/local/bin/kubeconform -C /usr/local/bin kubeconform-linux-amd64 +RUN chmod +x /usr/local/bin/kubeconform RUN chmod +x install_kustomize.sh RUN ./install_kustomize.sh /usr/local/bin From b54408e33832089a86fca35f8a37a7efa3d70506 Mon Sep 17 00:00:00 2001 From: Stephan Butler Date: Fri, 24 Oct 2025 12:41:21 +0200 Subject: [PATCH 4/4] fix: fixed kubeconform extraction --- chartvalidator/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chartvalidator/Dockerfile b/chartvalidator/Dockerfile index b088a45..1889932 100644 --- a/chartvalidator/Dockerfile +++ b/chartvalidator/Dockerfile @@ -23,8 +23,8 @@ RUN ./get_helm.sh RUN curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" -o install_kustomize.sh # Install kubeconform form alpine -RUN curl -sSL "https://github.com/yannh/kubeconform/releases/download/v0.7.0/kubeconform-linux-amd64.tar.gz" -o /usr/local/bin/kubeconform -RUN tar -xzf /usr/local/bin/kubeconform -C /usr/local/bin kubeconform-linux-amd64 +RUN curl -sSL "https://github.com/yannh/kubeconform/releases/download/v0.7.0/kubeconform-linux-amd64.tar.gz" -o /root/kubeconform.tar.gz +RUN tar -xzf /root/kubeconform.tar.gz -C /usr/local/bin kubeconform RUN chmod +x /usr/local/bin/kubeconform RUN chmod +x install_kustomize.sh