diff --git a/.github/workflows/build-test-release.yaml b/.github/workflows/build-test-release.yaml new file mode 100644 index 00000000..e099709b --- /dev/null +++ b/.github/workflows/build-test-release.yaml @@ -0,0 +1,32 @@ +name: build-test-release + +on: + push: + branches: + - main + +jobs: + build-test-release: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Build all targets. + run: | + make build-all + - name: Run unit tests across all targets. + run: | + make test-all + - name: Prepare scenarios to be released. + run: | + sudo apt install zip + zip -r scenarios.zip scenarios + - name: Release Innovation Engine + uses: "marvinpinto/action-automatic-releases@latest" + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + title: "IE" + automatic_release_tag: "latest" + prerelease: true + files: | + ./bin/ie + ./scenarios.zip diff --git a/.github/github-actions-demo.yml b/.github/workflows/github-actions-demo.yml~ similarity index 100% rename from .github/github-actions-demo.yml rename to .github/workflows/github-actions-demo.yml~ diff --git a/.github/workflows/helloWorld.yml b/.github/workflows/helloWorld.yml new file mode 100644 index 00000000..6e737461 --- /dev/null +++ b/.github/workflows/helloWorld.yml @@ -0,0 +1,36 @@ +# This is a basic workflow to help you get started with Actions + +name: CI + +# Controls when the workflow will run +on: + # Triggers the workflow on push or pull request events but only for the main branch + push: + branches: [ main ] + pull_request: + branches: [ main ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v3 + + # Runs a single command using the runners shell + - name: Run a one-line script + run: echo Hello, world! + + # Runs a set of commands using the runners shell + - name: Run a multi-line script + run: | + echo Add other actions to build, + echo test, and deploy your project. diff --git a/.github/workflows/scenario-testing.yaml b/.github/workflows/scenario-testing.yaml new file mode 100644 index 00000000..290de5be --- /dev/null +++ b/.github/workflows/scenario-testing.yaml @@ -0,0 +1,31 @@ +name: scenario-testing +on: + schedule: + - cron: "0 */2 * * *" + push: + branches: + - main + pull_request: + branches: + - main +jobs: + test-ocd-scenarios: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Build all targets. + run: | + make build-all + make test-all + - name: Sign into Azure + uses: azure/actions/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Run all one click deployment scenarios. + uses: azure/CLI@v1 + with: + inlineScript: | + make test-scenarios SUBSCRIPTION=${{ secrets.AZURE_SUBSCRIPTION }} + - name: Display ie.log file + run: | + cat ie.log diff --git a/.gitignore b/.gitignore index f383d4e8..c76f479a 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,11 @@ -*/*~ \ No newline at end of file +# Python +__pycache__ + +#VS Code +.vscode + +# Ignore all binaries. +bin/ + +# Ignore ie logs +ie.log diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..16811904 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,180 @@ + +# Contributing to InnovationEngine + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles contributions. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [Microsoft Open Source Contribution Guide](#microsoft-open-source-contribution-guide) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) +- [Your First Code Contribution](#your-first-code-contribution) + +- [Styleguides](#styleguides) + + +## Code of Conduct + +This project and everyone participating in it is governed by the +[InnovationEngine Code of Conduct](https://github.com/Azure/InnovationEngine/blob/main/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to mbifeld@microsoft.com. + +## Microsoft Open Source Contribution Guide + +This is a Microsoft Open Source project. Please reference to the [Microsoft Open Source Contribtution Guide](https://docs.opensource.microsoft.com/contributing/) for FAQs and general information on contributing to Microsoft Open Source. + +## I Have a Question + + +Before you ask a question, it is best to search for existing [Issues](https://github.com/Azure/InnovationEngine/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/Azure/InnovationEngine/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (golang version, operating system, etc), depending on what seems relevant. + +We will then address the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you down for more information. Therefore, we ask you to investigate carefully, collect information, and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](./README.md). If you are looking for support, you might want to check [I Have A Question](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in [Issues](https://github.com/Azure/InnovationEngine/issues). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM, etc) +- Version of the golang, make, etc depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities, or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead, sensitive bugs must be sent by email to mbifeld@microsoft.com. + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/Azure/InnovationEngine/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be [implemented by someone](#your-first-code-contribution). + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for InnovationEngine, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation](./README.md) carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/Azure/InnovationEngine/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits within the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub Issues](https://github.com/Azure/InnovationEngine/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. +- **Explain why this enhancement would be useful** to most InnovationEngine users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + +### Your First Code Contribution +#### Innovation Engine +To get started with developing features for the Innovation Engine itself, you +will need `make` & `go`. Once you have those installed and the project cloned +to a local repository, you can attempt to build the project using: + +```bash +make build-all +``` + +If the build completes, you should be able to start adding features/fixes +to the Innovation Engine codebase. Once you've added new changes, you can test +for regressions using: + +```bash +make test-all +``` + +If implementing a new feature, it is expected to add & update any necessary +tests for the changes introduced by the feature. + +If you're still looking for more information about how to build & run Innovation Engine, +[README](./README.md) has a more comprehensive guide for how to get started with project +development. + +#### Innovation Engine markdown scenarios + +If you are contributing to one of the markdown scenarios (executable documents) +for Innovation Engine, you are expected to follow the installation steps before +updating/adding your document. This is needed because once you've made changes +or have added a new scenario, you should test your executable document by +using the Innovation Engine: + +```bash +ie execute +``` + +This will attempt to parse your document into an executable scenario, make sure +that the commands extracted from codeblocks execute successfully, and that +their corresponding result blocks (if any) also line up with what the command +returned. Once you get your scenario to execute successfully, you should go ahead +and make a PR for it! + +#### Creating a PR + + +When creating a PR, please include as much context as possible. At minimum, this should include what the PR does and the testing strategies for it. + +If your PR is a work in progress, please label it as a draft and include 'WIP' at the beginning of the PR title. + + + +## Styleguides +For working on the Innovation Engine, `go fmt` is what is used to format the +code for the project. + +The commit style for individual commits doesn't necessarily matter as +all commits from a PR branch will be squashed and merged into the main +branch when PRs are completed. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..7f58ce47 --- /dev/null +++ b/Makefile @@ -0,0 +1,102 @@ +.PHONY: build-ie build-api build-all run-ie run-api clean test-all test all + +BINARY_DIR := bin +IE_BINARY := $(BINARY_DIR)/ie +API_BINARY := $(BINARY_DIR)/api + +# -------------------------- Native build targets ------------------------------ + +build-ie: + @echo "Building the Innovation Engine CLI..." + @CGO_ENABLED=0 go build -o "$(IE_BINARY)" cmd/ie/ie.go + +build-api: + @echo "Building the Innovation Engine API..." + @CGO_ENABLED=0 go build -o "$(API_BINARY)" cmd/api/main.go + +build-runner: build-ie build-api + @echo "Building the Innovation Engine Runner..." + @CGO_ENABLED=0 go build -o "$(BINARY_DIR)/runner" cmd/runner/main.go + +build-all: build-ie build-api build-runner + +# ------------------------------ Test targets ---------------------------------- + +test-all: + @echo "Running all tests..." + @go clean -testcache + @go test -v ./... + +SUBSCRIPTION ?= 00000000-0000-0000-0000-000000000000 +SCENARIO ?= ./README.md +test-scenario: + @echo "Running scenario $(SCENARIO)" + $(IE_BINARY) test $(SCENARIO) --subscription $(SUBSCRIPTION) + +test-scenarios: + @echo "Testing out the scenarios" + for dir in ./scenarios/ocd/*/; do \ + $(MAKE) test-scenario SCENARIO="$${dir}README.md" SUBCRIPTION="$(SUBSCRIPTION)"; \ + done + +# ------------------------------- Run targets ---------------------------------- + +run-ie: build-ie + @echo "Running the Innovation Engine CLI" + @"$(IE_BINARY)" + +run-api: build-api + @echo "Running the Innovation Engine API" + @"$(API_BINARY)" + +clean: + @echo "Cleaning up" + @rm -rf "$(BINARY_DIR)" + +# ----------------------------- Docker targets --------------------------------- + +API_IMAGE_TAG ?= latest + +# Builds the API container. +build-api-container: + @echo "Building the Innovation Engine API container" + @docker build -t innovation-engine-api:$(API_IMAGE_TAG) -f infra/api/Dockerfile . + + +# ----------------------------- Kubernetes targets ----------------------------- + +# Applies the ingress controller to the cluster and waits for it to be ready. +k8s-deploy-ingress-controller: + @echo "Deploying the ingress controller to your local cluster..." + @kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.7.1/deploy/static/provider/cloud/deploy.yaml + @kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=120s + +# Deploys the API deployment, service, and ingress specifications to the +# cluster, allowing the API to be accessed via the ingress controller. +k8s-deploy-api: build-api-container + @echo "Deploying the Innovation Engine API container to your local cluster..." + @kubectl apply -f infra/api/deployment.yaml + @kubectl apply -f infra/api/service.yaml + @kubectl apply -f infra/api/ingress.yaml + +k8s-initialize-cluster: k8s-deploy-ingress-controller k8s-deploy-api + @echo "Set up Kubernetes cluster for local development." + +k8s-delete-ingress-controller: + @echo "Deleting the ingress controller from your local cluster..." + @kubectl delete -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.7.1/deploy/static/provider/cloud/deploy.yaml + +k8s-delete-api: + @echo "Deleting the Innovation Engine API container from your local cluster..." + @kubectl delete -f infra/api/deployment.yaml + @kubectl delete -f infra/api/service.yaml + @kubectl delete -f infra/api/ingress.yaml + +k8s-refresh-api: k8s-delete-api k8s-deploy-api + @echo "Refreshed the Innovation Engine API container in your local cluster..." + +k8s-delete-cluster: k8s-delete-api k8s-delete-ingress-controller + @echo "Deleted Kubernetes cluster for local development." + +k8s-refresh-cluster: k8s-delete-cluster k8s-initialize-cluster + @echo "Refreshed Kubernetes cluster for local development." diff --git a/README.md b/README.md index 5cd7cecf..b9a5d9a0 100644 --- a/README.md +++ b/README.md @@ -1,33 +1,217 @@ -# Project +# Overview -> This repo has been populated by an initial template to help get you started. Please -> make sure to update the content to build a great experience for community-building. +Innovation Engine is a tool for rapid innovation and simplification. -As the maintainer of this project, please make a few updates: +# Executable Documentation +Executable documentation takes standard markdown language and amplifies it by +allowing it to be executed step by step in an educational manner, and tested +via automated CI/CD pipelines. -- Improving this README.MD file to provide a great experience -- Updating SUPPORT.MD with content about this project's support experience -- Understanding the security reporting process in SECURITY.MD -- Remove this section from the README +# Try Out Executable Documentation +Azure Cloud Shell provides an environment with all of the prerequisites +installed to run Executable Documentation. This is the recommended method for +new users to try and develop tutorials for Innovation Engine. + +Open [Azure Cloud Shell](https://ms.portal.azure.com/#cloudshell/) and select +Bash as the environment. Paste the following commands into the shell, this will +clone the Innovation Engine repo, install the requirements, and build out the +innovation engine executable. + +```bash +git clone https://github.com/Azure/InnovationEngine; +cd InnovationEngine; +make build-ie; +``` + +Now you can run the Innovation Engine tutorial with the following +command: + +```bash +./bin/ie execute tutorial.md +``` + +The general format to run an executable document is: +`ie ` + +### Modes of Operation +Today, executable documentation can be run in 3 modes of operation: + +Interactive: Displays the descriptive text of the tutorial and pauses at code +blocks and headings to allow user interaction +`ie interactive tutorial.md` + +Test: Runs the commands and then verifies that the output is sufficiently +similar to the expected results (recorded in the markdown file) to be +considered correct. `ie test tutorial.md` + +Execute: Reads the document and executes all of the code blocks not pausing for +input or testing output. Essentially executes a markdown file as a script. +`ie execute tutorial.md` + +## Use Executable documentation for Automated Testing +One of the core benefits of executable documentation is the ability to run +automated testing on markdown file. This can be used to ensure freshness of +content. + +In order to do this one will need to combine innovation engine executable +documentation syntax with GitHub actions. + +In order to test if a command or action ran correctly executable documentation +needs something to compare the results against. This requirement is met with +result blocks. + +### Result Blocks +Result blocks are distinguished in Executable documentation by a custom +expected_similarity comment tag followed by a code block. For example + + + +```text +Hello world +``` +This example purposely breaks the comment syntax so that it shows up in +markdown. Otherwise, the tag of expected_similarity is completely invisible. + +The expected similarity value is a floating point number between 0 and 1 which +specifies how closely the output needs to match the results block. 0 being no +similarity, 1 being an exact match. + +>**Note** It may take a little bit of trial and error to find the exact value for expected_similarity. + +### Environment Variables + +Another barrier to automated testing is setting default values for test cases +to use in running. This problem can be solved with command line variables in +Executable documentation Syntax. + +Default environment variables can be set for executable documentation in a few +different ways. + +1. A matching .ini file to the markdown + - Upon running any document executable documentation will look for a + corresponding .ini file. For example if my markdown file is named tutorial.md + the corresponding ini file would be tutorial.ini. + - This file is a simple key value match for environment variable and value. + For example: + ```ini + MY_RESOURCE_GROUP_NAME = myResourceGroup + MY_LOCATION = eastus + MY_VM_NAME = myVM + MY_VM_IMAGE = debian + MY_ADMIN_USERNAME = azureuser + ``` +2. A comment at the beginning of the document containing a code blog with the +tag 'variables'. This will be invisible to users unless they look at the raw +markdown. For example: + >**Note** The below example intentionally has broken comment syntax w/ two !'s. + + + +Variables set in comments will override variables set in a .ini file. +Consequently, locally declared variables in code samples will override +variables set in comments. + +### Setting Up GitHub Actions to use Innovation Engine + +After documentation is set up to take advantage of automated testing a github +action will need to be created to run testing on a recurring basis. The action +will simply create a basic Linux container, install Innovation Engine +Executable Documentation and run Executable documentation in the Test mode on +whatever markdown files are specified. + +It is important to note that if you require any specific access or cli tools +not included in standard bash that will need to be installed in the container. +The following example is how this may be done for a document which runs Azure +commands. + +```yml +name: 00-testing + +on: + push: + branches: + - main + + workflow_dispatch: + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Deploy + env: + AZURE_CREDENTIALS: ${{ secrets.AZURE_CREDENTIALS }} + GITHUB_SHA: ${{ github.sha }} + run: | + cd $GITHUB_WORKSPACE/ + git clone https://github.com/Azure/InnovationEngine/tree/ParserAndExecutor + cd innovationEngine + pip3 install -r requirements.txt + cp ../../articles/quick-create-cli.md README.md + python3 main.py test README.md +``` + + +## Use Executable Documentation for Interactive Documentation + +Innovation Engine can also be used for interactive tutorials via a local or +remote shell environment. After cloning the project and running +`make build-ie`, Innovation Engine can be used for +interactive tutorials by simply using the interactive flag when executing the +program. For example, `./bin/ie interactive tutorial.md` + +As it is written the code will pause and wait for input on any header or code +block. Any document written in standard markdown can be run as an interactive +document. ## Contributing -This project welcomes contributions and suggestions. Most contributions require you to agree to a -Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. +This is an open source project. Don't keep your code improvements, +features and cool ideas to yourself. Please issue pull requests +against our [GitHub repo](https://github.com/Azure/innovationengine). + +Be sure to use our Git pre-commit script to test your contributions +before committing, simply run the following command: `python3 main.py test test` + +This project welcomes contributions and suggestions. Most +contributions require you to agree to a Contributor License Agreement +(CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit +https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine +whether you need to provide a CLA and decorate the PR appropriately +(e.g., label, comment). Simply follow the instructions provided by the +bot. You will only need to do this once across all repos using our +CLA. -When you submit a pull request, a CLA bot will automatically determine whether you need to provide -a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions -provided by the bot. You will only need to do this once across all repos using our CLA. +This project has adopted +the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see +the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with +any additional questions or comments. -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. ## Trademarks -This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft -trademarks or logos is subject to and must follow -[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). -Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. -Any use of third-party trademarks or logos are subject to those third-party's policies. +This project may contain trademarks or logos for projects, products, or +services. Authorized use of Microsoft trademarks or logos is subject to and +must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). +Use of Microsoft trademarks or logos in modified versions of this project must +not cause confusion or imply Microsoft sponsorship. Any use of third-party +trademarks or logos are subject to those third-party's policies. diff --git a/cmd/api/main.go b/cmd/api/main.go new file mode 100644 index 00000000..dffdaadb --- /dev/null +++ b/cmd/api/main.go @@ -0,0 +1,55 @@ +package main + +import ( + "net/http" + "path" + + "github.com/Azure/InnovationEngine/internal/kube" + "github.com/google/uuid" + "github.com/labstack/echo/v4" + "github.com/labstack/echo/v4/middleware" +) + +var ( + BASE_ROUTE = "/api" + HEALTH_ROUTE = path.Join(BASE_ROUTE, "health") + EXECUTION_ROUTE = path.Join(BASE_ROUTE, "execute") + DEPLOYMENTS_ROUTE = path.Join(BASE_ROUTE, "deployments") +) + +func main() { + server := echo.New() + + // Setup middleware. + server.Use(middleware.Logger()) + server.Use(middleware.Recover()) + + server.GET(HEALTH_ROUTE, func(c echo.Context) error { + return c.JSON(http.StatusOK, map[string]string{"message": "OK"}) + }) + + server.POST(EXECUTION_ROUTE, func(c echo.Context) error { + clientset, err := kube.GetKubernetesClient() + + id := uuid.New().String() + + // Create deployment + deployment := kube.GetAgentDeployment(id) + _, err = kube.CreateAgentDeployment(clientset, deployment) + + if err != nil { + return c.JSON(http.StatusInternalServerError, map[string]string{"message": err.Error()}) + } + + // Create service + service := kube.GetAgentService(id) + _, err = kube.CreateAgentService(clientset, service) + if err != nil { + return c.JSON(http.StatusInternalServerError, map[string]string{"message": err.Error()}) + } + + return c.JSON(http.StatusOK, map[string]string{"deployment": deployment.Name, "service": service.Name}) + }) + + server.Logger.Fatal(server.Start(":8080")) +} diff --git a/cmd/api/types.go b/cmd/api/types.go new file mode 100644 index 00000000..454727e7 --- /dev/null +++ b/cmd/api/types.go @@ -0,0 +1,11 @@ +package main + +type DeploymentStep struct { + Name string `json:"name"` + Command string `json:"command"` +} + +type DeploymentResponse struct { + AgentWebsocketUrl string `json:"agentWebsocketUrl"` + Steps []DeploymentStep `json:"steps"` +} diff --git a/cmd/ie/commands/execute.go b/cmd/ie/commands/execute.go new file mode 100644 index 00000000..b6075928 --- /dev/null +++ b/cmd/ie/commands/execute.go @@ -0,0 +1,130 @@ +package commands + +import ( + "fmt" + "os" + "strings" + + "github.com/Azure/InnovationEngine/internal/engine" + "github.com/Azure/InnovationEngine/internal/logging" + "github.com/spf13/cobra" +) + +// / Register the command with our command runner. +func init() { + rootCommand.AddCommand(executeCommand) + + // Bool flags + executeCommand.PersistentFlags(). + Bool("verbose", false, "Enable verbose logging & standard output.") + executeCommand.PersistentFlags(). + Bool("do-not-delete", false, "Do not delete the Azure resources created by the Azure CLI commands executed.") + + // String flags + executeCommand.PersistentFlags(). + String("correlation-id", "", "Adds a correlation ID to the user agent used by a scenarios azure-cli commands.") + executeCommand.PersistentFlags(). + String("subscription", "", "Sets the subscription ID used by a scenarios azure-cli commands. Will rely on the default subscription if not set.") + executeCommand.PersistentFlags(). + String("working-directory", ".", "Sets the working directory for innovation engine to operate out of. Restores the current working directory when finished.") + + // StringArray flags + executeCommand.PersistentFlags(). + StringArray("var", []string{}, "Sets an environment variable for the scenario. Format: --var =") +} + +var executeCommand = &cobra.Command{ + Use: "execute [markdown file]", + Args: cobra.MinimumNArgs(1), + Short: "Execute the commands for an Azure deployment scenario.", + Run: func(cmd *cobra.Command, args []string) { + markdownFile := args[0] + if markdownFile == "" { + logging.GlobalLogger.Errorf("Error: No markdown file specified.") + cmd.Help() + os.Exit(1) + } + + verbose, _ := cmd.Flags().GetBool("verbose") + doNotDelete, _ := cmd.Flags().GetBool("do-not-delete") + + subscription, _ := cmd.Flags().GetString("subscription") + correlationId, _ := cmd.Flags().GetString("correlation-id") + environment, _ := cmd.Flags().GetString("environment") + workingDirectory, _ := cmd.Flags().GetString("working-directory") + + environmentVariables, _ := cmd.Flags().GetStringArray("var") + features, _ := cmd.Flags().GetStringArray("feature") + + // Known features + renderValues := false + + // Parse the environment variables from the command line into a map + cliEnvironmentVariables := make(map[string]string) + for _, environmentVariable := range environmentVariables { + keyValuePair := strings.SplitN(environmentVariable, "=", 2) + if len(keyValuePair) != 2 { + logging.GlobalLogger.Errorf( + "Error: Invalid environment variable format: %s", + environmentVariable, + ) + fmt.Printf("Error: Invalid environment variable format: %s", environmentVariable) + cmd.Help() + os.Exit(1) + } + + cliEnvironmentVariables[keyValuePair[0]] = keyValuePair[1] + } + + for _, feature := range features { + switch feature { + case "render-values": + renderValues = true + default: + logging.GlobalLogger.Errorf( + "Error: Invalid feature: %s", + feature, + ) + fmt.Printf("Error: Invalid feature: %s\n", feature) + cmd.Help() + os.Exit(1) + } + } + + // Parse the markdown file and create a scenario + scenario, err := engine.CreateScenarioFromMarkdown( + markdownFile, + []string{"bash", "azurecli", "azurecli-interactive", "terraform"}, + cliEnvironmentVariables, + ) + if err != nil { + logging.GlobalLogger.Errorf("Error creating scenario: %s", err) + fmt.Printf("Error creating scenario: %s", err) + os.Exit(1) + } + + innovationEngine, err := engine.NewEngine(engine.EngineConfiguration{ + Verbose: verbose, + DoNotDelete: doNotDelete, + Subscription: subscription, + CorrelationId: correlationId, + Environment: environment, + WorkingDirectory: workingDirectory, + RenderValues: renderValues, + }) + + if err != nil { + logging.GlobalLogger.Errorf("Error creating engine: %s", err) + fmt.Printf("Error creating engine: %s", err) + os.Exit(1) + } + + // Execute the scenario + err = innovationEngine.ExecuteScenario(scenario) + if err != nil { + logging.GlobalLogger.Errorf("Error executing scenario: %s", err) + fmt.Printf("Error executing scenario: %s", err) + os.Exit(1) + } + }, +} diff --git a/cmd/ie/commands/interactive.go b/cmd/ie/commands/interactive.go new file mode 100644 index 00000000..064a3581 --- /dev/null +++ b/cmd/ie/commands/interactive.go @@ -0,0 +1,15 @@ +package commands + +import ( + "github.com/spf13/cobra" +) + +var interactiveCommand = &cobra.Command{ + Use: "interactive", + Short: "Execute a document in interactive mode.", +} + +// / Register the command with our command runner. +func init() { + rootCommand.AddCommand(interactiveCommand) +} diff --git a/cmd/ie/commands/root.go b/cmd/ie/commands/root.go new file mode 100644 index 00000000..9c4dcc4a --- /dev/null +++ b/cmd/ie/commands/root.go @@ -0,0 +1,56 @@ +package commands + +import ( + "fmt" + "os" + + "github.com/Azure/InnovationEngine/internal/engine/environments" + "github.com/Azure/InnovationEngine/internal/logging" + "github.com/spf13/cobra" +) + +// The root command for the CLI. Currently initializes the logging for all other +// commands. +var rootCommand = &cobra.Command{ + Use: "ie", + Short: "The innovation engine.", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + logLevel, err := cmd.Flags().GetString("log-level") + if err != nil { + fmt.Printf("Error getting log level: %s", err) + os.Exit(1) + } + logging.Init(logging.LevelFromString(logLevel)) + + // Check environment + environment, err := cmd.Flags().GetString("environment") + if err != nil { + fmt.Printf("Error getting environment: %s", err) + logging.GlobalLogger.Errorf("Error getting environment: %s", err) + os.Exit(1) + } + + if !environments.IsValidEnvironment(environment) { + fmt.Printf("Invalid environment: %s", environment) + logging.GlobalLogger.Errorf("Invalid environment: %s", err) + os.Exit(1) + } + }, +} + +// Entrypoint into the Innovation Engine CLI. +func ExecuteCLI() { + rootCommand.PersistentFlags(). + String("log-level", string(logging.Debug), "Configure the log level") + rootCommand.PersistentFlags(). + String("environment", environments.EnvironmentsLocal, "The environment that the CLI is running in. (local, ci, ocd)") + + rootCommand.PersistentFlags(). + StringArray("feature", []string{}, "Enables the specified feature. Format: --feature ") + + if err := rootCommand.Execute(); err != nil { + fmt.Println(err) + logging.GlobalLogger.Errorf("Error executing command: %s", err) + os.Exit(1) + } +} diff --git a/cmd/ie/commands/test.go b/cmd/ie/commands/test.go new file mode 100644 index 00000000..f1ddcd8c --- /dev/null +++ b/cmd/ie/commands/test.go @@ -0,0 +1,63 @@ +package commands + +import ( + "fmt" + "os" + + "github.com/Azure/InnovationEngine/internal/engine" + "github.com/Azure/InnovationEngine/internal/logging" + "github.com/spf13/cobra" +) + +// / Register the command with our command runner. +func init() { + rootCommand.AddCommand(testCommand) + testCommand.PersistentFlags(). + Bool("verbose", false, "Enable verbose logging & standard output.") + testCommand.PersistentFlags(). + String("subscription", "", "Sets the subscription ID used by a scenarios azure-cli commands. Will rely on the default subscription if not set.") +} + +var testCommand = &cobra.Command{ + Use: "test", + Args: cobra.MinimumNArgs(1), + Short: "Test document commands against it's expected outputs.", + Run: func(cmd *cobra.Command, args []string) { + + markdownFile := args[0] + if markdownFile == "" { + cmd.Help() + return + } + + verbose, _ := cmd.Flags().GetBool("verbose") + subscription, _ := cmd.Flags().GetString("subscription") + + innovationEngine, err := engine.NewEngine(engine.EngineConfiguration{ + Verbose: verbose, + DoNotDelete: false, + Subscription: subscription, + CorrelationId: "", + }) + + if err != nil { + logging.GlobalLogger.Errorf("Error creating engine %s", err) + fmt.Printf("Error creating engine %s", err) + os.Exit(1) + } + + scenario, err := engine.CreateScenarioFromMarkdown( + markdownFile, + []string{"bash", "azurecli", "azurecli-interactive", "terraform"}, + nil, + ) + if err != nil { + logging.GlobalLogger.Errorf("Error creating scenario %s", err) + fmt.Printf("Error creating engine %s", err) + os.Exit(1) + } + + innovationEngine.TestScenario(scenario) + + }, +} diff --git a/cmd/ie/commands/to-bash.go b/cmd/ie/commands/to-bash.go new file mode 100644 index 00000000..248e1427 --- /dev/null +++ b/cmd/ie/commands/to-bash.go @@ -0,0 +1,90 @@ +package commands + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/Azure/InnovationEngine/internal/engine" + "github.com/Azure/InnovationEngine/internal/engine/environments" + "github.com/Azure/InnovationEngine/internal/logging" + "github.com/spf13/cobra" +) + +type AzureScript struct { + Script string `json:"script"` +} + +var toBashCommand = &cobra.Command{ + Use: "to-bash", + Short: "Convert a markdown scenario into a bash script.", + RunE: func(cmd *cobra.Command, args []string) error { + markdownFile := args[0] + if markdownFile == "" { + logging.GlobalLogger.Errorf("Error: No markdown file specified.") + return errors.New("error: No markdown file specified") + } + + environment, _ := cmd.Flags().GetString("environment") + environmentVariables, _ := cmd.Flags().GetStringArray("var") + + // Parse the environment variables + cliEnvironmentVariables := make(map[string]string) + for _, environmentVariable := range environmentVariables { + keyValuePair := strings.SplitN(environmentVariable, "=", 2) + if len(keyValuePair) != 2 { + logging.GlobalLogger.Errorf( + "Error: Invalid environment variable format: %s", + environmentVariable, + ) + fmt.Printf("Error: Invalid environment variable format: %s", environmentVariable) + cmd.Help() + return fmt.Errorf( + "error: Invalid environment variable format, %s", + environmentVariable, + ) + } + + cliEnvironmentVariables[keyValuePair[0]] = keyValuePair[1] + } + + // Parse the markdown file and create a scenario + scenario, err := engine.CreateScenarioFromMarkdown( + markdownFile, + []string{"bash", "azurecli", "azurecli-interactive", "terraform"}, + cliEnvironmentVariables) + + if err != nil { + logging.GlobalLogger.Errorf("Error creating scenario: %s", err) + fmt.Printf("Error creating scenario: %s", err) + return err + } + + // If within cloudshell, we need to wrap the script in a json object to + // communicate it to the portal. + if environments.IsAzureEnvironment(environment) { + script := AzureScript{Script: scenario.ToShellScript()} + scriptJson, err := json.Marshal(script) + + if err != nil { + logging.GlobalLogger.Errorf("Error converting to json: %s", err) + fmt.Printf("Error converting to json: %s", err) + return err + } + + fmt.Printf("ie_us%sie_ue\n", scriptJson) + } else { + fmt.Printf("%s", scenario.ToShellScript()) + } + + return nil + + }, +} + +func init() { + rootCommand.AddCommand(toBashCommand) + toBashCommand.PersistentFlags(). + StringArray("var", []string{}, "Sets an environment variable for the scenario. Format: --var =") +} diff --git a/cmd/ie/ie.go b/cmd/ie/ie.go new file mode 100644 index 00000000..ea115e84 --- /dev/null +++ b/cmd/ie/ie.go @@ -0,0 +1,7 @@ +package main + +import "github.com/Azure/InnovationEngine/cmd/ie/commands" + +func main() { + commands.ExecuteCLI() +} diff --git a/cmd/runner/main.go b/cmd/runner/main.go new file mode 100644 index 00000000..e5a802c0 --- /dev/null +++ b/cmd/runner/main.go @@ -0,0 +1,13 @@ +package main + +import ( + "fmt" + "net/http" +) + +func main() { + fmt.Println("Hello, world!") + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, world!") + }) +} diff --git a/docs/specs/00000-template.md b/docs/specs/00000-template.md new file mode 100644 index 00000000..e69de29b diff --git a/docs/specs/00001-expectation-system.md b/docs/specs/00001-expectation-system.md new file mode 100644 index 00000000..06c8a799 --- /dev/null +++ b/docs/specs/00001-expectation-system.md @@ -0,0 +1,150 @@ +# Setting the expectations around codeblock results + +## Introduction + +When ie executes a codeblock within a markdown document, the successful execution +of the command alone is usually an indication that we accomplished what the +document was aiming to achieve, however, it does not make any guarantees about +the results returned from the execution itself. For example, let's look at a +codeblock which performs an operation on virtual machines in azure. The +command may look something like this: + +```bash +az vm create ... +``` + +And a successful output to that command may look like: + +```json +{ + "fqdns": "", + "id": "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/", + "identity": { + "systemAssignedIdentity": "", + "userAssignedIdentities": {} + }, + "location": "eastus", + "macAddress": "00-0D-3A-1C-6B-66", + "powerState": "VM running", + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "172.178.12.226", + "resourceGroup": "", + "zones": "" +} +``` + +Looking throughout the output, we can see that there is a significant amount of +variation that can occur between mutliple successful runs of the previous +command. However though, most of the time we would only like to specify that the +output either looks like an output that we've seen before or that a +certain pattern can be found the output itself. In order for the +Innovation Engine to be able to solve this problem, we need to create a system +that allows authors to quantify what they expect the result of a command to +look like. + +## Solution + +Most of the codeblocks that the Innovation Engine currently executes are written +in bash and run executables to carry out tasks on behalf of a user. Instead of +only relying on the exit code to determine that a codeblock successfully +executed, we must look at the side effects (output to stdout/stderr) produced +by the command too. We will only perform comparisons when a command succeeds +and only from output sent to the standard output stream. If documentation +authors would like to make comparisons using the comamnd output from standard +error, they can manually combine the standard output/error stream into the +output stream within the codeblock itself. + +To allow documentation authors to set their expectations around results, +we would like to introduce a new syntax to markdown documents in the form of +`expectation` tags. Expectation tags are HTML 5 comments that must be defined +inside of the markdown documents above a result block and below a codeblock. +Here's how that would look: + +````markdown +# Example codeblock + +```bash +echo "foo" +``` + + + +``` +foo +``` +```` + +IE takes the standard output from the execution of the +codeblock containing `echo "foo"` and compares it against the result codeblock +below our expectation tag using [Jaro Similarity](https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance) +as the metric for determining the two strings similarity. The value provided by +the author sets the threshold for how similar the strings must be on a scale +of 0 to 1. + +If a documentation author instead wanted to ensure that a specific pattern +existed within result of a command execution, they can change the parameters +`type` and `value` like so: + +````markdown +# Example codeblock + +```bash +echo "foo" + +``` + + + +``` +foo +``` +```` + +Instead of computing a similarity score, IE converts the `value` provided by +the author into a regular expression that is matched against the +output of `echo "foo"`. + +This system should cover most scenarios in which a document author would like +to set expecatations around the value that is returned from the execution of +bash commands, but if more use cases arise then expanding on this implementation +only requires exposing new `type` and `value` parameter configurations + +## Requirements + +- [ ] Support for comparing the similarity of the actual command output + against the document authors expected output codeblock given a threshold + for how similar the strings should be. +- [ ] Support for checking if a pattern exists within the actual command output + using regex. +- [ ] When an expectation fails, IE should give useful feedback about how to + report the issue to the document author. +- [ ] Update at least one of the documents used within the testing pipeline + to use the new expectation system + +## Technical specifications + +- If the codeblock that represents the expected result is marked as `JSON`, IE + will attempt to parse the actual command output as JSON. If parsing the JSON + fails, the error will include information about what to report and where to + report it. +- When the similarity score is being computed for JSON codeblocks, the objects + are sorted alphabetically by key before the comparison is made so that the + similarity score that is computed is accurate as possible. We do this because + we care more about the changes between the values inside the JSON objects + than we do the difference in key ordering. + +## Notes + +- This will deprecate the old tag `expected_similarity`. We will initially + retain support for it while we transition current documents towards using + `expectation` but will add warnings in the log file for authors using it. +- A good but not absolute method to approximate what threshold should be used + for a similarity score check is to measure the ratio of static text to dynamic + text in the command output. For outputs where `static text > dynamic text`, + you should set a higher threshold for the comparison because there is + less text that changes between executions of the command. Conversely when + `static text < dynamic text`, you should set a lower thresold. + +## References + +- [Jaro Similarity](https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance) diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..06e39089 --- /dev/null +++ b/go.mod @@ -0,0 +1,73 @@ +module github.com/Azure/InnovationEngine + +go 1.20 + +require ( + github.com/charmbracelet/lipgloss v0.7.1 + github.com/google/uuid v1.3.0 + github.com/labstack/echo/v4 v4.10.2 + github.com/sergi/go-diff v1.3.1 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/cobra v1.7.0 + github.com/stretchr/testify v1.8.2 + github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 + github.com/yuin/goldmark v1.5.4 + golang.org/x/sys v0.13.0 + gopkg.in/ini.v1 v1.67.0 + k8s.io/api v0.27.1 + k8s.io/apimachinery v0.27.1 + k8s.io/client-go v0.27.1 +) + +require ( + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt v3.2.2+incompatible // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/labstack/gommon v0.4.0 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect + github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/muesli/reflow v0.3.0 // indirect + github.com/muesli/termenv v0.15.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasttemplate v1.2.2 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/klog/v2 v2.90.1 // indirect + k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect + k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..30e33ca1 --- /dev/null +++ b/go.sum @@ -0,0 +1,546 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/charmbracelet/lipgloss v0.7.1 h1:17WMwi7N1b1rVWOjMT+rCh7sQkvDU75B2hbZpc5Kc1E= +github.com/charmbracelet/lipgloss v0.7.1/go.mod h1:yG0k3giv8Qj8edTCbbg6AlQ5e8KNWpFujkNawKNhE2c= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.10.2 h1:n1jAhnq/elIFTHr1EYpiYtyKgx4RW9ccVgkqByZaN2M= +github.com/labstack/echo/v4 v4.10.2/go.mod h1:OEyqf2//K1DFdE57vw2DRgWY0M7s65IVQO2FzvI4J5k= +github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8= +github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/termenv v0.15.1 h1:UzuTb/+hhlBugQz28rpzey4ZuKcZ03MeKsoG7IJZIxs= +github.com/muesli/termenv v0.15.1/go.mod h1:HeAQPTzpfs016yGtA4g00CsdYnVLJvxsS4ANqrZs2sQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= +github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.5.4 h1:2uY/xC0roWy8IBEGLgB1ywIoEJFGmRrX21YQcvGZzjU= +github.com/yuin/goldmark v1.5.4/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.27.1 h1:Z6zUGQ1Vd10tJ+gHcNNNgkV5emCyW+v2XTmn+CLjSd0= +k8s.io/api v0.27.1/go.mod h1:z5g/BpAiD+f6AArpqNjkY+cji8ueZDU/WV1jcj5Jk4E= +k8s.io/apimachinery v0.27.1 h1:EGuZiLI95UQQcClhanryclaQE6xjg1Bts6/L3cD7zyc= +k8s.io/apimachinery v0.27.1/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM= +k8s.io/client-go v0.27.1 h1:oXsfhW/qncM1wDmWBIuDzRHNS2tLhK3BZv512Nc59W8= +k8s.io/client-go v0.27.1/go.mod h1:f8LHMUkVb3b9N8bWturc+EDtVVVwZ7ueTVquFAJb2vA= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg= +k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= +k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= +k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/infra/api/Dockerfile b/infra/api/Dockerfile new file mode 100644 index 00000000..38bd2c7f --- /dev/null +++ b/infra/api/Dockerfile @@ -0,0 +1,17 @@ +FROM mcr.microsoft.com/cbl-mariner/base/core:2.0 + +ARG HOST=0.0.0.0 +ARG PORT=8080 + +WORKDIR /api + +RUN tdnf update && \ + tdnf install golang make ca-certificates -y + +COPY . . + +RUN make build-api + +EXPOSE 8080 + +CMD ["./bin/api"] \ No newline at end of file diff --git a/infra/api/deployment.yaml b/infra/api/deployment.yaml new file mode 100644 index 00000000..46151e87 --- /dev/null +++ b/infra/api/deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: innovation-engine-api +spec: + replicas: 2 + selector: + matchLabels: + app: innovation-engine-api + template: + metadata: + labels: + app: innovation-engine-api + spec: + containers: + - name: innovation-engine-api + image: innovation-engine-api:latest + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 \ No newline at end of file diff --git a/infra/api/ingress.yaml b/infra/api/ingress.yaml new file mode 100644 index 00000000..ec544529 --- /dev/null +++ b/infra/api/ingress.yaml @@ -0,0 +1,16 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: innovation-engine-api-ingress +spec: + rules: + - host: innovation-engine.localhost + http: + paths: + - path: "/" + pathType: Prefix + backend: + service: + name: innovation-engine-api-service + port: + number: 80 \ No newline at end of file diff --git a/infra/api/service.yaml b/infra/api/service.yaml new file mode 100644 index 00000000..938112f0 --- /dev/null +++ b/infra/api/service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: innovation-engine-api-service +spec: + selector: + app: innovation-engine-api + ports: + - protocol: TCP + port: 80 + targetPort: 8080 \ No newline at end of file diff --git a/infra/runner/Dockerfile b/infra/runner/Dockerfile new file mode 100644 index 00000000..3ccd4c1a --- /dev/null +++ b/infra/runner/Dockerfile @@ -0,0 +1,14 @@ +FROM mcr.microsoft.com/cbl-mariner/base/core:2.0 + +ARG HOST=0.0.0.0 +ARG PORT=8080 + +WORKDIR /api + +RUN tdnf install golang make -y + +COPY . . + +RUN make build-runner + +CMD ["./bin/runner"] \ No newline at end of file diff --git a/internal/az/account.go b/internal/az/account.go new file mode 100644 index 00000000..25b6d443 --- /dev/null +++ b/internal/az/account.go @@ -0,0 +1,32 @@ +package az + +import ( + "fmt" + + "github.com/Azure/InnovationEngine/internal/logging" + "github.com/Azure/InnovationEngine/internal/shells" +) + +func SetSubscription(subscription string) error { + if subscription != "" { + command := fmt.Sprintf("az account set --subscription %s", subscription) + _, err := shells.ExecuteBashCommand( + command, + shells.BashCommandConfiguration{ + EnvironmentVariables: map[string]string{}, + InteractiveCommand: false, + WriteToHistory: false, + InheritEnvironment: false, + }, + ) + + if err != nil { + logging.GlobalLogger.Errorf("Failed to set subscription: %s", err) + return err + } + + logging.GlobalLogger.Infof("Set subscription to %s", subscription) + } + + return nil +} diff --git a/internal/az/env.go b/internal/az/env.go new file mode 100644 index 00000000..e66c3b30 --- /dev/null +++ b/internal/az/env.go @@ -0,0 +1,17 @@ +package az + +import ( + "fmt" + + "github.com/Azure/InnovationEngine/internal/logging" +) + +// If the correlation ID is set, we need to set the AZURE_HTTP_USER_AGENT +// environment variable so that the Azure CLI will send the correlation ID +// with Azure Resource Manager requests. +func SetCorrelationId(correlationId string, env map[string]string) { + if correlationId != "" { + env["AZURE_HTTP_USER_AGENT"] = fmt.Sprintf("innovation-engine-%s", correlationId) + logging.GlobalLogger.Info("Resource tracking enabled. Tracking ID: " + env["AZURE_HTTP_USER_AGENT"]) + } +} diff --git a/internal/az/env_test.go b/internal/az/env_test.go new file mode 100644 index 00000000..2ff973a6 --- /dev/null +++ b/internal/az/env_test.go @@ -0,0 +1,16 @@ +package az + +import ( + "testing" +) + +func TestSetCorrelationId(t *testing.T) { + t.Run("Test setting a custom correlation ID", func(t *testing.T) { + correlationId := "test-correlation-id" + env := map[string]string{} + SetCorrelationId(correlationId, env) + if env["AZURE_HTTP_USER_AGENT"] != "innovation-engine-test-correlation-id" { + t.Errorf("Expected AZURE_HTTP_USER_AGENT to be set to innovation-engine-test-correlation-id, got %s", env["AZURE_HTTP_USER_AGENT"]) + } + }) +} diff --git a/internal/az/group.go b/internal/az/group.go new file mode 100644 index 00000000..82cb9b6e --- /dev/null +++ b/internal/az/group.go @@ -0,0 +1,40 @@ +package az + +import ( + "github.com/Azure/InnovationEngine/internal/logging" + "github.com/Azure/InnovationEngine/internal/patterns" + "github.com/Azure/InnovationEngine/internal/shells" +) + +// Find all the deployed resources in a resource group. +func FindAllDeployedResourceURIs(resourceGroup string) []string { + output, err := shells.ExecuteBashCommand( + "az resource list -g "+resourceGroup, + shells.BashCommandConfiguration{ + EnvironmentVariables: map[string]string{}, + InheritEnvironment: true, + InteractiveCommand: false, + WriteToHistory: true, + }, + ) + + if err != nil { + logging.GlobalLogger.Error("Failed to list deployments", err) + } + + matches := patterns.AzResourceURI.FindAllStringSubmatch(output.StdOut, -1) + results := []string{} + for _, match := range matches { + results = append(results, match[1]) + } + return results +} + +// Find the resource group name from the output of an az command. +func FindResourceGroupName(commandOutput string) string { + matches := patterns.AzResourceGroupName.FindStringSubmatch(commandOutput) + if len(matches) > 1 { + return matches[1] + } + return "" +} diff --git a/internal/engine/common.go b/internal/engine/common.go new file mode 100644 index 00000000..23e75d8d --- /dev/null +++ b/internal/engine/common.go @@ -0,0 +1,67 @@ +package engine + +import ( + "fmt" + "strings" + + "github.com/Azure/InnovationEngine/internal/lib" + "github.com/Azure/InnovationEngine/internal/logging" + "github.com/Azure/InnovationEngine/internal/ui" + "github.com/xrash/smetrics" +) + +// Indents a multi-line command to be nested under the first line of the +// command. +func indentMultiLineCommand(content string, indentation int) string { + lines := strings.Split(content, "\n") + for i := 1; i < len(lines); i++ { + if strings.HasSuffix(strings.TrimSpace(lines[i-1]), "\\") { + lines[i] = strings.Repeat(" ", indentation) + lines[i] + } else if strings.TrimSpace(lines[i]) != "" { + lines[i] = strings.Repeat(" ", indentation) + lines[i] + } + + } + return strings.Join(lines, "\n") +} + +// Compares the actual output of a command to the expected output of a command. +func compareCommandOutputs( + actualOutput string, + expectedOutput string, + expectedSimilarity float64, + expectedOutputLanguage string, +) error { + if strings.ToLower(expectedOutputLanguage) == "json" { + logging.GlobalLogger.Debugf( + "Comparing JSON strings:\nExpected: %s\nActual%s", + expectedOutput, + actualOutput, + ) + results, err := lib.CompareJsonStrings(actualOutput, expectedOutput, expectedSimilarity) + + if err != nil { + return err + } + + if !results.AboveThreshold { + return fmt.Errorf( + ui.ErrorMessageStyle.Render("Expected output does not match actual output."), + ) + } + + logging.GlobalLogger.Debugf( + "Expected Similarity: %f, Actual Similarity: %f", + expectedSimilarity, + results.Score, + ) + } else { + score := smetrics.JaroWinkler(expectedOutput, actualOutput, 0.7, 4) + + if expectedSimilarity > score { + return fmt.Errorf(ui.ErrorMessageStyle.Render("Expected output does not match actual output.")) + } + } + + return nil +} diff --git a/internal/engine/engine.go b/internal/engine/engine.go new file mode 100644 index 00000000..843cdef6 --- /dev/null +++ b/internal/engine/engine.go @@ -0,0 +1,56 @@ +package engine + +import ( + "fmt" + + "github.com/Azure/InnovationEngine/internal/az" + "github.com/Azure/InnovationEngine/internal/lib" + "github.com/Azure/InnovationEngine/internal/lib/fs" + "github.com/Azure/InnovationEngine/internal/ui" +) + +// Configuration for the engine. +type EngineConfiguration struct { + Verbose bool + DoNotDelete bool + CorrelationId string + Subscription string + Environment string + WorkingDirectory string + RenderValues bool +} + +type Engine struct { + Configuration EngineConfiguration +} + +// / Create a new engine instance. +func NewEngine(configuration EngineConfiguration) (*Engine, error) { + return &Engine{ + Configuration: configuration, + }, nil +} + +// Executes a deployment scenario. +func (e *Engine) ExecuteScenario(scenario *Scenario) error { + return fs.UsingDirectory(e.Configuration.WorkingDirectory, func() error { + az.SetCorrelationId(e.Configuration.CorrelationId, scenario.Environment) + + // Execute the steps + fmt.Println(ui.ScenarioTitleStyle.Render(scenario.Name)) + err := e.ExecuteAndRenderSteps(scenario.Steps, lib.CopyMap(scenario.Environment)) + return err + }) +} + +// Validates a deployment scenario. +func (e *Engine) TestScenario(scenario *Scenario) error { + return fs.UsingDirectory(e.Configuration.WorkingDirectory, func() error { + az.SetCorrelationId(e.Configuration.CorrelationId, scenario.Environment) + + // Test the steps + fmt.Println(ui.ScenarioTitleStyle.Render(scenario.Name)) + err := e.TestSteps(scenario.Steps, lib.CopyMap(scenario.Environment)) + return err + }) +} diff --git a/internal/engine/environments/azure.go b/internal/engine/environments/azure.go new file mode 100644 index 00000000..0526abdd --- /dev/null +++ b/internal/engine/environments/azure.go @@ -0,0 +1,100 @@ +package environments + +import ( + "encoding/json" + "fmt" + + "github.com/Azure/InnovationEngine/internal/az" + "github.com/Azure/InnovationEngine/internal/logging" + "github.com/Azure/InnovationEngine/internal/ui" +) + +// / The status of a one-click deployment. +type AzureDeploymentStatus struct { + Steps []string `json:"steps"` + CurrentStep int `json:"currentStep"` + Status string `json:"status"` + ResourceURIs []string `json:"resourceURIs"` + Error string `json:"error"` +} + +func NewAzureDeploymentStatus() AzureDeploymentStatus { + return AzureDeploymentStatus{ + Steps: []string{}, + CurrentStep: 0, + Status: "Executing", + ResourceURIs: []string{}, + Error: "", + } +} + +// Get the status as a JSON string. +func (status *AzureDeploymentStatus) AsJsonString() (string, error) { + json, err := json.Marshal(status) + if err != nil { + logging.GlobalLogger.Error("Failed to marshal status", err) + return "", err + } + + return string(json), nil +} + +func (status *AzureDeploymentStatus) AddStep(step string) { + status.Steps = append(status.Steps, step) +} + +func (status *AzureDeploymentStatus) AddResourceURI(uri string) { + status.ResourceURIs = append(status.ResourceURIs, uri) +} + +func (status *AzureDeploymentStatus) SetError(err error) { + status.Status = "Failed" + status.Error = err.Error() +} + +// Print out the status JSON for azure/cloudshell if in the correct environment. +func ReportAzureStatus(status AzureDeploymentStatus, environment string) { + if !IsAzureEnvironment(environment) { + return + } + + statusJson, err := status.AsJsonString() + if err != nil { + logging.GlobalLogger.Error("Failed to marshal status", err) + } else { + // We add these strings to the output so that the portal can find and parse + // the JSON status. + ocdStatus := fmt.Sprintf("ie_us%sie_ue\n", statusJson) + fmt.Println(ui.OcdStatusUpdateStyle.Render(ocdStatus)) + } +} + +// Attach deployed resource URIs to the one click deployment status if we're in +// the correct environment & we have a resource group name. +func AttachResourceURIsToAzureStatus( + status *AzureDeploymentStatus, + resourceGroupName string, + environment string, +) { + + if !IsAzureEnvironment(environment) { + logging.GlobalLogger.Info( + "Not fetching resource URIs because we're not in the OCD environment.", + ) + } + + if resourceGroupName == "" { + logging.GlobalLogger.Warn("No resource group name found.") + return + } + + resourceURIs := az.FindAllDeployedResourceURIs(resourceGroupName) + + if len(resourceURIs) > 0 { + logging.GlobalLogger.WithField("resourceURIs", resourceURIs). + Info("Found deployed resources.") + status.ResourceURIs = resourceURIs + } else { + logging.GlobalLogger.Warn("No deployed resources found.") + } +} diff --git a/internal/engine/environments/environments.go b/internal/engine/environments/environments.go new file mode 100644 index 00000000..c6c9dc45 --- /dev/null +++ b/internal/engine/environments/environments.go @@ -0,0 +1,22 @@ +package environments + +const ( + EnvironmentsLocal = "local" + EnvironmentsCI = "ci" + EnvironmentsOCD = "ocd" + EnvironmentsAzure = "azure" +) + +// Check if the environment is valid. +func IsValidEnvironment(environment string) bool { + switch environment { + case EnvironmentsLocal, EnvironmentsCI, EnvironmentsOCD, EnvironmentsAzure: + return true + default: + return false + } +} + +func IsAzureEnvironment(environment string) bool { + return environment == EnvironmentsAzure || environment == EnvironmentsOCD +} diff --git a/internal/engine/execution.go b/internal/engine/execution.go new file mode 100644 index 00000000..a72d2d00 --- /dev/null +++ b/internal/engine/execution.go @@ -0,0 +1,304 @@ +package engine + +import ( + "fmt" + "strings" + "time" + + "github.com/Azure/InnovationEngine/internal/az" + "github.com/Azure/InnovationEngine/internal/engine/environments" + "github.com/Azure/InnovationEngine/internal/lib" + "github.com/Azure/InnovationEngine/internal/logging" + "github.com/Azure/InnovationEngine/internal/parsers" + "github.com/Azure/InnovationEngine/internal/patterns" + "github.com/Azure/InnovationEngine/internal/shells" + "github.com/Azure/InnovationEngine/internal/terminal" + "github.com/Azure/InnovationEngine/internal/ui" +) + +const ( + // TODO - Make this configurable for terminals that support it. + // spinnerFrames = `⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏` + spinnerFrames = `-\|/` + spinnerRefresh = 100 * time.Millisecond +) + +// If a scenario has an `az group delete` command and the `--do-not-delete` +// flag is set, we remove it from the steps. +func filterDeletionCommands(steps []Step, preserveResources bool) []Step { + filteredSteps := []Step{} + if preserveResources { + for _, step := range steps { + newBlocks := []parsers.CodeBlock{} + for _, block := range step.CodeBlocks { + if patterns.AzGroupDelete.MatchString(block.Content) { + continue + } else { + newBlocks = append(newBlocks, block) + } + } + if len(newBlocks) > -1 { + filteredSteps = append(filteredSteps, Step{ + Name: step.Name, + CodeBlocks: newBlocks, + }) + } + } + } else { + filteredSteps = steps + } + return filteredSteps +} + +func renderCommand(blockContent string) (shells.CommandOutput, error) { + escapedCommand := blockContent + if !patterns.MultilineQuotedStringCommand.MatchString(blockContent) { + escapedCommand = strings.ReplaceAll(blockContent, "\\\n", "\\\\\n") + } + renderedCommand, err := shells.ExecuteBashCommand( + "echo -e \""+escapedCommand+"\"", + shells.BashCommandConfiguration{ + EnvironmentVariables: map[string]string{}, + InteractiveCommand: false, + WriteToHistory: false, + InheritEnvironment: true, + }, + ) + return renderedCommand, err +} + +// Executes the steps from a scenario and renders the output to the terminal. +func (e *Engine) ExecuteAndRenderSteps(steps []Step, env map[string]string) error { + + var resourceGroupName string = "" + var azureStatus = environments.NewAzureDeploymentStatus() + + err := az.SetSubscription(e.Configuration.Subscription) + if err != nil { + logging.GlobalLogger.Errorf("Invalid Config: Failed to set subscription: %s", err) + azureStatus.SetError(err) + environments.ReportAzureStatus(azureStatus, e.Configuration.Environment) + return err + } + + stepsToExecute := filterDeletionCommands(steps, e.Configuration.DoNotDelete) + + for stepNumber, step := range stepsToExecute { + azureStatus.AddStep(fmt.Sprintf("%d. %s", stepNumber+1, step.Name)) + } + + environments.ReportAzureStatus(azureStatus, e.Configuration.Environment) + + for stepNumber, step := range stepsToExecute { + stepTitle := fmt.Sprintf("%d. %s\n", stepNumber+1, step.Name) + fmt.Println(ui.StepTitleStyle.Render(stepTitle)) + azureStatus.CurrentStep = stepNumber + 1 + + for _, block := range step.CodeBlocks { + var finalCommandOutput string + if e.Configuration.RenderValues { + // Render the codeblock. + renderedCommand, err := renderCommand(block.Content) + if err != nil { + logging.GlobalLogger.Errorf("Failed to render command: %s", err.Error()) + azureStatus.SetError(err) + environments.ReportAzureStatus(azureStatus, e.Configuration.Environment) + return err + } + finalCommandOutput = indentMultiLineCommand(renderedCommand.StdOut, 4) + } else { + finalCommandOutput = indentMultiLineCommand(block.Content, 4) + } + + fmt.Print(" " + finalCommandOutput) + + // execute the command as a goroutine to allow for the spinner to be + // rendered while the command is executing. + done := make(chan error) + var commandOutput shells.CommandOutput + + // If the command is an SSH command, we need to forward the input and + // output + interactiveCommand := false + if patterns.SshCommand.MatchString(block.Content) { + interactiveCommand = true + } + + logging.GlobalLogger.WithField("isInteractive", interactiveCommand). + Infof("Executing command: %s", block.Content) + + var commandErr error + var frame int = 0 + + // If forwarding input/output, don't render the spinner. + if !interactiveCommand { + // Grab the number of lines it contains & set the cursor to the + // beginning of the block. + + lines := strings.Count(finalCommandOutput, "\n") + terminal.MoveCursorPositionUp(lines) + + // Render the spinner and hide the cursor. + fmt.Print(ui.SpinnerStyle.Render(" "+string(spinnerFrames[0])) + " ") + terminal.HideCursor() + + go func(block parsers.CodeBlock) { + output, err := shells.ExecuteBashCommand( + block.Content, + shells.BashCommandConfiguration{ + EnvironmentVariables: lib.CopyMap(env), + InheritEnvironment: true, + InteractiveCommand: false, + WriteToHistory: true, + }, + ) + logging.GlobalLogger.Infof("Command output to stdout:\n %s", output.StdOut) + logging.GlobalLogger.Infof("Command output to stderr:\n %s", output.StdErr) + commandOutput = output + done <- err + }(block) + renderingLoop: + // While the command is executing, render the spinner. + for { + select { + case commandErr = <-done: + // Show the cursor, check the result of the command, and display the + // final status. + terminal.ShowCursor() + + if commandErr == nil { + + actualOutput := commandOutput.StdOut + expectedOutput := block.ExpectedOutput.Content + expectedSimilarity := block.ExpectedOutput.ExpectedSimilarity + expectedOutputLanguage := block.ExpectedOutput.Language + + outputComparisonError := compareCommandOutputs(actualOutput, expectedOutput, expectedSimilarity, expectedOutputLanguage) + + if outputComparisonError != nil { + logging.GlobalLogger.Errorf("Error comparing command outputs: %s", outputComparisonError.Error()) + fmt.Printf("\r %s \n", ui.ErrorStyle.Render("✗")) + terminal.MoveCursorPositionDown(lines) + fmt.Printf(" %s\n", ui.ErrorMessageStyle.Render(outputComparisonError.Error())) + fmt.Printf(" %s\n", lib.GetDifferenceBetweenStrings(block.ExpectedOutput.Content, commandOutput.StdOut)) + + azureStatus.SetError(outputComparisonError) + environments.ReportAzureStatus(azureStatus, e.Configuration.Environment) + + return outputComparisonError + } + + fmt.Printf("\r %s \n", ui.CheckStyle.Render("✔")) + terminal.MoveCursorPositionDown(lines) + + fmt.Printf("%s\n", ui.RemoveHorizontalAlign(ui.VerboseStyle.Render(commandOutput.StdOut))) + + // Extract the resource group name from the command output if + // it's not already set. + if resourceGroupName == "" && patterns.AzCommand.MatchString(block.Content) { + logging.GlobalLogger.Info("Attempting to extract resource group name from command output") + tmpResourceGroup := az.FindResourceGroupName(commandOutput.StdOut) + if tmpResourceGroup != "" { + logging.GlobalLogger.WithField("resourceGroup", tmpResourceGroup).Info("Found resource group") + resourceGroupName = tmpResourceGroup + } + } + + if stepNumber != len(stepsToExecute)-1 { + environments.ReportAzureStatus(azureStatus, e.Configuration.Environment) + } + + } else { + terminal.ShowCursor() + fmt.Printf("\r %s \n", ui.ErrorStyle.Render("✗")) + terminal.MoveCursorPositionDown(lines) + fmt.Printf(" %s\n", ui.ErrorMessageStyle.Render(commandErr.Error())) + + logging.GlobalLogger.Errorf("Error executing command: %s", commandErr.Error()) + + azureStatus.SetError(commandErr) + environments.ReportAzureStatus(azureStatus, e.Configuration.Environment) + + return commandErr + } + + break renderingLoop + default: + frame = (frame + 1) % len(spinnerFrames) + fmt.Printf("\r %s", ui.SpinnerStyle.Render(string(spinnerFrames[frame]))) + time.Sleep(spinnerRefresh) + } + } + } else { + lines := strings.Count(block.Content, "\n") + + // If we're on the last step and the command is an SSH command, we need + // to report the status before executing the command. This is needed for + // one click deployments and does not affect the normal execution flow. + if stepNumber == len(stepsToExecute)-1 && patterns.SshCommand.MatchString(block.Content) { + azureStatus.Status = "Succeeded" + environments.AttachResourceURIsToAzureStatus(&azureStatus, resourceGroupName, e.Configuration.Environment) + environments.ReportAzureStatus(azureStatus, e.Configuration.Environment) + } + + output, commandExecutionError := shells.ExecuteBashCommand( + block.Content, + shells.BashCommandConfiguration{ + EnvironmentVariables: lib.CopyMap(env), + InheritEnvironment: true, + InteractiveCommand: true, + WriteToHistory: false, + }, + ) + + terminal.ShowCursor() + + if commandExecutionError == nil { + fmt.Printf("\r %s \n", ui.CheckStyle.Render("✔")) + terminal.MoveCursorPositionDown(lines) + + fmt.Printf(" %s\n", ui.VerboseStyle.Render(output.StdOut)) + + if stepNumber != len(stepsToExecute)-1 { + environments.ReportAzureStatus(azureStatus, e.Configuration.Environment) + } + } else { + fmt.Printf("\r %s \n", ui.ErrorStyle.Render("✗")) + terminal.MoveCursorPositionDown(lines) + fmt.Printf(" %s\n", ui.ErrorMessageStyle.Render(commandExecutionError.Error())) + + azureStatus.SetError(commandExecutionError) + environments.ReportAzureStatus(azureStatus, e.Configuration.Environment) + return commandExecutionError + } + } + } + } + + // Report the final status of the deployment (Only applies to one click deployments). + azureStatus.Status = "Succeeded" + environments.AttachResourceURIsToAzureStatus( + &azureStatus, + resourceGroupName, + e.Configuration.Environment, + ) + environments.ReportAzureStatus(azureStatus, e.Configuration.Environment) + + switch e.Configuration.Environment { + case environments.EnvironmentsAzure, environments.EnvironmentsOCD: + logging.GlobalLogger.Info( + "Cleaning environment variable file located at /tmp/env-vars", + ) + err := shells.CleanEnvironmentStateFile() + + if err != nil { + logging.GlobalLogger.Errorf("Error cleaning environment variables: %s", err.Error()) + return err + } + + default: + shells.ResetStoredEnvironmentVariables() + } + + return nil +} diff --git a/internal/engine/execution_test.go b/internal/engine/execution_test.go new file mode 100644 index 00000000..875229cd --- /dev/null +++ b/internal/engine/execution_test.go @@ -0,0 +1,24 @@ +package engine + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestExecuteBlock(t *testing.T) { + blocks := []string{ + "echo \"hello \\\nworld\"", // tutorial.md + "echo hello \\\nworld", + "echo \"hello world\"", + "echo hello world", + "ls \\\n-a", + } + for _, blockCommand := range blocks { + t.Run("render command", func(t *testing.T) { + _, err := renderCommand(blockCommand) + assert.Equal(t, nil, err) + }) + } + +} diff --git a/internal/engine/scenario.go b/internal/engine/scenario.go new file mode 100644 index 00000000..b847b51c --- /dev/null +++ b/internal/engine/scenario.go @@ -0,0 +1,195 @@ +package engine + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/Azure/InnovationEngine/internal/lib" + "github.com/Azure/InnovationEngine/internal/lib/fs" + "github.com/Azure/InnovationEngine/internal/logging" + "github.com/Azure/InnovationEngine/internal/parsers" + "github.com/yuin/goldmark/ast" +) + +// Individual steps within a scenario. +type Step struct { + Name string + CodeBlocks []parsers.CodeBlock +} + +// Scenarios are the top-level object that represents a scenario to be executed. +type Scenario struct { + Name string + MarkdownAst ast.Node + Steps []Step + Environment map[string]string +} + +// Groups the codeblocks into steps based on the header of the codeblock. +// This organizes the codeblocks into steps that can be executed linearly. +func groupCodeBlocksIntoSteps(blocks []parsers.CodeBlock) []Step { + var groupedSteps []Step + var headerIndex = make(map[string]int) + + for _, block := range blocks { + if index, ok := headerIndex[block.Header]; ok { + groupedSteps[index].CodeBlocks = append(groupedSteps[index].CodeBlocks, block) + } else { + headerIndex[block.Header] = len(groupedSteps) + groupedSteps = append(groupedSteps, Step{ + Name: block.Header, + CodeBlocks: []parsers.CodeBlock{block}, + }) + } + } + + return groupedSteps +} + +// Creates a scenario object from a given markdown file. languagesToExecute is +// used to filter out code blocks that should not be parsed out of the markdown +// file. +func CreateScenarioFromMarkdown( + path string, + languagesToExecute []string, + environmentVariableOverrides map[string]string, +) (*Scenario, error) { + if !fs.FileExists(path) { + return nil, fmt.Errorf("markdown file '%s' does not exist", path) + } + + source, err := os.ReadFile(path) + if err != nil { + panic(err) + } + + // Load environment variables + markdownINI := strings.TrimSuffix(path, filepath.Ext(path)) + ".ini" + environmentVariables := make(map[string]string) + + // Check if the INI file exists & load it. + if !fs.FileExists(markdownINI) { + logging.GlobalLogger.Infof("INI file '%s' does not exist, skipping...", markdownINI) + } else { + logging.GlobalLogger.Infof("INI file '%s' exists, loading...", markdownINI) + environmentVariables, err = parsers.ParseINIFile(markdownINI) + + if err != nil { + return nil, err + } + + for key, value := range environmentVariables { + logging.GlobalLogger.Debugf("Setting %s=%s\n", key, value) + } + } + + // Convert the markdonw into an AST and extract the scenario variables. + markdown := parsers.ParseMarkdownIntoAst(source) + scenarioVariables := parsers.ExtractScenarioVariablesFromAst(markdown, source) + for key, value := range scenarioVariables { + environmentVariables[key] = value + } + + // Extract the code blocks from the markdown file. + codeBlocks := parsers.ExtractCodeBlocksFromAst(markdown, source, languagesToExecute) + logging.GlobalLogger.WithField("CodeBlocks", codeBlocks). + Debugf("Found %d code blocks", len(codeBlocks)) + + varsToExport := lib.CopyMap(environmentVariableOverrides) + for key, value := range environmentVariableOverrides { + logging.GlobalLogger.Debugf("Attempting to override %s with %s", key, value) + exportRegex := regexp.MustCompile(fmt.Sprintf(`export %s=["']?([a-z-A-Z0-9_]+)["']?`, key)) + + for index, codeBlock := range codeBlocks { + matches := exportRegex.FindAllStringSubmatch(codeBlock.Content, -1) + + if len(matches) != 0 { + logging.GlobalLogger.Debugf( + "Found %d matches for %s, deleting from varsToExport", + len(matches), + key, + ) + delete(varsToExport, key) + } else { + logging.GlobalLogger.Debugf("Found no matches for %s inside of %s", key, codeBlock.Content) + } + + for _, match := range matches { + oldLine := match[0] + oldValue := match[1] + + // Replace the old export with the new export statement + newLine := strings.Replace(oldLine, oldValue, value, 1) + logging.GlobalLogger.Debugf("Replacing '%s' with '%s'", oldLine, newLine) + + // Update the code block with the new export statement + codeBlocks[index].Content = strings.Replace(codeBlock.Content, oldLine, newLine, 1) + } + + } + } + + // If there are some variables left after going through each of the codeblocks, + // do not update the scenario + // steps. + if len(varsToExport) != 0 { + logging.GlobalLogger.Debugf( + "Found %d variables to add to the scenario as a step.", + len(varsToExport), + ) + exportCodeBlock := parsers.CodeBlock{ + Language: "bash", + Content: "", + Header: "Exporting variables defined via the CLI and not in the markdown file.", + ExpectedOutput: parsers.ExpectedOutputBlock{}, + } + for key, value := range varsToExport { + exportCodeBlock.Content += fmt.Sprintf("export %s=\"%s\"\n", key, value) + } + + codeBlocks = append([]parsers.CodeBlock{exportCodeBlock}, codeBlocks...) + } + + // Group the code blocks into steps. + steps := groupCodeBlocksIntoSteps(codeBlocks) + title, err := parsers.ExtractScenarioTitleFromAst(markdown, source) + if err != nil { + return nil, err + } + + logging.GlobalLogger.Infof("Successfully built out the scenario: %s", title) + + return &Scenario{ + Name: title, + Environment: environmentVariables, + Steps: steps, + MarkdownAst: markdown, + }, nil +} + +func (s *Scenario) OverwriteEnvironmentVariables(environmentVariables map[string]string) { + for key, value := range environmentVariables { + s.Environment[key] = value + } +} + +// Convert a scenario into a shell script +func (s *Scenario) ToShellScript() string { + var script strings.Builder + + for key, value := range s.Environment { + script.WriteString(fmt.Sprintf("export %s=\"%s\"\n", key, value)) + } + + for _, step := range s.Steps { + script.WriteString(fmt.Sprintf("# %s\n", step.Name)) + for _, block := range step.CodeBlocks { + script.WriteString(fmt.Sprintf("%s\n", block.Content)) + } + } + + return script.String() +} diff --git a/internal/engine/testing.go b/internal/engine/testing.go new file mode 100644 index 00000000..414e4a74 --- /dev/null +++ b/internal/engine/testing.go @@ -0,0 +1,133 @@ +package engine + +import ( + "errors" + "fmt" + "time" + + "github.com/Azure/InnovationEngine/internal/az" + "github.com/Azure/InnovationEngine/internal/lib" + "github.com/Azure/InnovationEngine/internal/logging" + "github.com/Azure/InnovationEngine/internal/parsers" + "github.com/Azure/InnovationEngine/internal/patterns" + "github.com/Azure/InnovationEngine/internal/shells" + "github.com/Azure/InnovationEngine/internal/terminal" + "github.com/Azure/InnovationEngine/internal/ui" +) + +func (e *Engine) TestSteps(steps []Step, env map[string]string) error { + var resourceGroupName string + stepsToExecute := filterDeletionCommands(steps, true) + err := az.SetSubscription(e.Configuration.Subscription) + + if err != nil { + logging.GlobalLogger.Errorf("Invalid Config: Failed to set subscription: %s", err) + return err + } + + var testRunnerError error = nil +testRunner: + for stepNumber, step := range stepsToExecute { + stepTitle := fmt.Sprintf(" %d. %s\n", stepNumber+1, step.Name) + fmt.Println(ui.StepTitleStyle.Render(stepTitle)) + terminal.MoveCursorPositionUp(1) + terminal.HideCursor() + + for _, block := range step.CodeBlocks { + // execute the command as a goroutine to allow for the spinner to be + // rendered while the command is executing. + done := make(chan error) + var commandOutput shells.CommandOutput + go func(block parsers.CodeBlock) { + logging.GlobalLogger.Infof("Executing command: %s", block.Content) + output, err := shells.ExecuteBashCommand(block.Content, shells.BashCommandConfiguration{EnvironmentVariables: lib.CopyMap(env), InheritEnvironment: true, InteractiveCommand: false, WriteToHistory: true}) + logging.GlobalLogger.Infof("Command stdout: %s", output.StdOut) + logging.GlobalLogger.Infof("Command stderr: %s", output.StdErr) + commandOutput = output + done <- err + }(block) + + frame := 0 + var err error + + loop: + // While the command is executing, render the spinner. + for { + select { + case err = <-done: + terminal.ShowCursor() + + if err == nil { + actualOutput := commandOutput.StdOut + expectedOutput := block.ExpectedOutput.Content + expectedSimilarity := block.ExpectedOutput.ExpectedSimilarity + expectedOutputLanguage := block.ExpectedOutput.Language + + err := compareCommandOutputs(actualOutput, expectedOutput, expectedSimilarity, expectedOutputLanguage) + + if err != nil { + logging.GlobalLogger.Errorf("Error comparing command outputs: %s", err.Error()) + fmt.Print(ui.ErrorStyle.Render("Error when comparing the command outputs: %s\n", err.Error())) + } + + // Extract the resource group name from the command output if + // it's not already set. + if resourceGroupName == "" && patterns.AzCommand.MatchString(block.Content) { + tmpResourceGroup := az.FindResourceGroupName(commandOutput.StdOut) + if tmpResourceGroup != "" { + logging.GlobalLogger.Infof("Found resource group: %s", tmpResourceGroup) + resourceGroupName = tmpResourceGroup + } + } + + fmt.Printf("\r %s \n", ui.CheckStyle.Render("✔")) + terminal.MoveCursorPositionDown(1) + } else { + + fmt.Printf("\r %s \n", ui.ErrorStyle.Render("✗")) + terminal.MoveCursorPositionDown(1) + fmt.Printf(" %s\n", ui.ErrorStyle.Render("Error executing command: %s\n", err.Error())) + + logging.GlobalLogger.Errorf("Error executing command: %s", err.Error()) + + testRunnerError = err + break testRunner + } + + break loop + default: + frame = (frame + 1) % len(spinnerFrames) + fmt.Printf("\r %s", ui.SpinnerStyle.Render(string(spinnerFrames[frame]))) + time.Sleep(spinnerRefresh) + } + } + } + } + + // If the resource group name was set, delete it. + if resourceGroupName != "" { + fmt.Printf("\n") + fmt.Printf("Deleting resource group: %s\n", resourceGroupName) + command := fmt.Sprintf("az group delete --name %s --yes", resourceGroupName) + output, err := shells.ExecuteBashCommand( + command, + shells.BashCommandConfiguration{ + EnvironmentVariables: lib.CopyMap(env), + InheritEnvironment: true, + InteractiveCommand: false, + WriteToHistory: true, + }, + ) + + if err != nil { + fmt.Print(ui.ErrorStyle.Render("Error deleting resource group: %s\n", err.Error())) + logging.GlobalLogger.Errorf("Error deleting resource group: %s", err.Error()) + testRunnerError = errors.Join(testRunnerError, err) + } + + fmt.Print(output.StdOut) + } + + shells.ResetStoredEnvironmentVariables() + return testRunnerError +} diff --git a/internal/kube/client.go b/internal/kube/client.go new file mode 100644 index 00000000..f26601da --- /dev/null +++ b/internal/kube/client.go @@ -0,0 +1,37 @@ +package kube + +import ( + "os" + "path/filepath" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// Obtains the Kubernetes clientset based on the environment +// this function is executing in. +func GetKubernetesClient() (*kubernetes.Clientset, error) { + var config *rest.Config + var err error + + if _, err := rest.InClusterConfig(); err != nil { + kubeConfig := filepath.Join(os.Getenv("HOME"), ".kube", "config") + config, err = clientcmd.BuildConfigFromFlags("", kubeConfig) + if err != nil { + return nil, err + } + } else { + config, err = rest.InClusterConfig() + if err != nil { + return nil, err + } + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + + return clientset, nil +} diff --git a/internal/kube/deployments.go b/internal/kube/deployments.go new file mode 100644 index 00000000..89ccc3cf --- /dev/null +++ b/internal/kube/deployments.go @@ -0,0 +1,56 @@ +package kube + +import ( + "context" + + "github.com/Azure/InnovationEngine/internal/lib" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +func GetAgentDeployment(id string) *appsv1.Deployment { + + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "runner-" + id, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: lib.Int32Ptr(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "runner", + "id": id, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "runner", + "id": id, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "innovation-engine-runner", + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 8080, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + }, + }, + } +} + +func CreateAgentDeployment(clientset *kubernetes.Clientset, deployment *appsv1.Deployment) (*appsv1.Deployment, error) { + return clientset.AppsV1().Deployments("default").Create(context.TODO(), deployment, metav1.CreateOptions{}) +} diff --git a/internal/kube/services.go b/internal/kube/services.go new file mode 100644 index 00000000..39c6cbfa --- /dev/null +++ b/internal/kube/services.go @@ -0,0 +1,34 @@ +package kube + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +func GetAgentService(id string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "runner - " + id, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "app": "runner", + "id": id, + }, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: 8080, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } +} + +func CreateAgentService(clientset *kubernetes.Clientset, service *corev1.Service) (*corev1.Service, error) { + return clientset.CoreV1().Services("default").Create(context.TODO(), service, metav1.CreateOptions{}) +} diff --git a/internal/lib/diff.go b/internal/lib/diff.go new file mode 100644 index 00000000..d595b1bd --- /dev/null +++ b/internal/lib/diff.go @@ -0,0 +1,12 @@ +package lib + +import ( + "github.com/sergi/go-diff/diffmatchpatch" +) + +func GetDifferenceBetweenStrings(a, b string) string { + dmp := diffmatchpatch.New() + + diffs := dmp.DiffMain(a, b, false) + return dmp.DiffPrettyText(diffs) +} diff --git a/internal/lib/fs/directories.go b/internal/lib/fs/directories.go new file mode 100644 index 00000000..f6211761 --- /dev/null +++ b/internal/lib/fs/directories.go @@ -0,0 +1,41 @@ +package fs + +import ( + "errors" + "os" + + "github.com/Azure/InnovationEngine/internal/logging" +) + +func SetWorkingDirectory(directory string) error { + // Change working directory if specified + if directory != "" { + err := os.Chdir(directory) + if err != nil { + logging.GlobalLogger.Error("Failed to change working directory", err) + return err + } + + logging.GlobalLogger.Infof("Changed directory to %s", directory) + } + return nil +} + +// Executes a function within a given working directory and restores +// the original working directory when the function completes. +func UsingDirectory(directory string, executor func() error) error { + originalDirectory, err := os.Getwd() + if err != nil { + return err + } + + err = SetWorkingDirectory(directory) + if err != nil { + return err + } + + executionError := executor() + err = SetWorkingDirectory(originalDirectory) + + return errors.Join(executionError, err) +} diff --git a/internal/lib/fs/file.go b/internal/lib/fs/file.go new file mode 100644 index 00000000..36f44860 --- /dev/null +++ b/internal/lib/fs/file.go @@ -0,0 +1,9 @@ +package fs + +import "os" + +// Checks if a given file exists. +func FileExists(path string) bool { + _, err := os.Stat(path) + return !os.IsNotExist(err) +} diff --git a/internal/lib/ints.go b/internal/lib/ints.go new file mode 100644 index 00000000..5b0d834c --- /dev/null +++ b/internal/lib/ints.go @@ -0,0 +1,3 @@ +package lib + +func Int32Ptr(i int32) *int32 { return &i } diff --git a/internal/lib/json.go b/internal/lib/json.go new file mode 100644 index 00000000..a5c1f875 --- /dev/null +++ b/internal/lib/json.go @@ -0,0 +1,52 @@ +package lib + +import ( + "encoding/json" + + "github.com/xrash/smetrics" +) + +func OrderJsonFields(jsonStr string) (string, error) { + expectedMap := make(map[string]interface{}) + err := json.Unmarshal([]byte(jsonStr), &expectedMap) + if err != nil { + return "", err + } + + orderedJson, err := json.Marshal(expectedMap) + if err != nil { + return "", err + } + return string(orderedJson), nil +} + +type ComparisonResult struct { + AboveThreshold bool + Score float64 +} + +// Compare two JSON strings by ordering the fields alphabetically and then +// comparing the strings using the Jaro-Winkler algorithm to compute a score. +// If the score is greater than the threshold, return true. +func CompareJsonStrings( + actualJson string, + expectedJson string, + threshold float64, +) (ComparisonResult, error) { + actualOutput, err := OrderJsonFields(actualJson) + if err != nil { + return ComparisonResult{}, err + } + + expectedOutput, err := OrderJsonFields(expectedJson) + if err != nil { + return ComparisonResult{}, err + } + + score := smetrics.Jaro(actualOutput, expectedOutput) + + return ComparisonResult{ + AboveThreshold: score >= threshold, + Score: score, + }, nil +} diff --git a/internal/lib/json_test.go b/internal/lib/json_test.go new file mode 100644 index 00000000..55c21f80 --- /dev/null +++ b/internal/lib/json_test.go @@ -0,0 +1 @@ +package lib diff --git a/internal/lib/maps.go b/internal/lib/maps.go new file mode 100644 index 00000000..7e4388a5 --- /dev/null +++ b/internal/lib/maps.go @@ -0,0 +1,20 @@ +package lib + +// Makes a copy of a map +func CopyMap(m map[string]string) map[string]string { + result := make(map[string]string) + for k, v := range m { + result[k] = v + } + return result +} + +// Merge two maps together. +func MergeMaps(a, b map[string]string) map[string]string { + merged := CopyMap(a) + for k, v := range b { + merged[k] = v + } + + return merged +} diff --git a/internal/lib/maps_test.go b/internal/lib/maps_test.go new file mode 100644 index 00000000..fe68eec5 --- /dev/null +++ b/internal/lib/maps_test.go @@ -0,0 +1,46 @@ +package lib + +import ( + "testing" +) + +func TestMapUtilities(t *testing.T) { + + t.Run("Copying maps", func(t *testing.T) { + original := make(map[string]string) + original["key"] = "value" + + copy := CopyMap(original) + + if len(copy) != 1 { + t.Errorf("Copy length is wrong: %d", len(copy)) + } + + if copy["key"] != "value" { + t.Errorf("Copy is wrong: %s", copy["key"]) + } + }) + + t.Run("Merging maps", func(t *testing.T) { + original := make(map[string]string) + original["key"] = "value" + + merge := make(map[string]string) + merge["key2"] = "value2" + + merged := MergeMaps(original, merge) + + if len(merged) != 2 { + t.Errorf("Merged length is wrong: %d", len(merged)) + } + + if merged["key"] != "value" { + t.Errorf("Merged is wrong: %s", merged["key"]) + } + + if merged["key2"] != "value2" { + t.Errorf("Merged is wrong: %s", merged["key2"]) + } + }) + +} diff --git a/internal/lib/user.go b/internal/lib/user.go new file mode 100644 index 00000000..6066aaa9 --- /dev/null +++ b/internal/lib/user.go @@ -0,0 +1,30 @@ +package lib + +import ( + "fmt" + "os" + "os/user" +) + +func GetHomeDirectory() (string, error) { + // Try to get home directory from user.Current() + usr, err := user.Current() + if err == nil { + return usr.HomeDir, nil + } + + // Fallback to environment variable + home, exists := os.LookupEnv("HOME") + if exists && home != "" { + return home, nil + } + + // Fallback for Windows + homeDrive, driveExists := os.LookupEnv("HOMEDRIVE") + homePath, pathExists := os.LookupEnv("HOMEPATH") + if driveExists && pathExists { + return homeDrive + homePath, nil + } + + return "", fmt.Errorf("home directory cannot be determined") +} diff --git a/internal/logging/logging.go b/internal/logging/logging.go new file mode 100644 index 00000000..e8fd9c45 --- /dev/null +++ b/internal/logging/logging.go @@ -0,0 +1,80 @@ +package logging + +import ( + "os" + + "github.com/sirupsen/logrus" +) + +type Level string + +const ( + Trace Level = "trace" + Debug Level = "debug" + Info Level = "info" + Warn Level = "warn" + Error Level = "error" + Fatal Level = "fatal" +) + +// / Convert a logging level to a logrus level (uint32). +func (l Level) Integer() logrus.Level { + switch l { + case Trace: + return logrus.TraceLevel + case Debug: + return logrus.DebugLevel + case Info: + return logrus.InfoLevel + case Warn: + return logrus.WarnLevel + case Error: + return logrus.ErrorLevel + case Fatal: + return logrus.FatalLevel + default: + return logrus.InfoLevel + } +} + +// / Convert a string to a logging level. +func LevelFromString(level string) Level { + switch level { + case string(Trace): + return Trace + case string(Debug): + return Debug + case string(Info): + return Info + case string(Warn): + return Warn + case string(Error): + return Error + case string(Fatal): + return Fatal + default: + return Info + } +} + +var GlobalLogger = logrus.New() + +func Init(level Level) { + GlobalLogger.SetFormatter(&logrus.TextFormatter{ + DisableColors: false, + FullTimestamp: true, + DisableQuote: true, + }) + + GlobalLogger.SetReportCaller(false) + GlobalLogger.SetLevel(level.Integer()) + + file, err := os.OpenFile("ie.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + + if err == nil { + GlobalLogger.SetOutput(file) + } else { + GlobalLogger.SetOutput(os.Stdout) + GlobalLogger.Warn("Failed to log to file, using default stderr") + } +} diff --git a/internal/parsers/ini.go b/internal/parsers/ini.go new file mode 100644 index 00000000..f4cb58ff --- /dev/null +++ b/internal/parsers/ini.go @@ -0,0 +1,27 @@ +package parsers + +import ( + "fmt" + + "gopkg.in/ini.v1" +) + +// Parses an INI file into a flat map of keys mapped to values. This reduces +// the complexity of the INI file to a simple key/value store and ignores the +// sections. +func ParseINIFile(filePath string) (map[string]string, error) { + + iniFile, err := ini.Load(filePath) + + if err != nil { + return nil, fmt.Errorf("failed to read the INI file %s because %v", filePath, err) + } + + data := make(map[string]string) + for _, section := range iniFile.Sections() { + for key, value := range section.KeysHash() { + data[key] = value + } + } + return data, nil +} diff --git a/internal/parsers/ini_test.go b/internal/parsers/ini_test.go new file mode 100644 index 00000000..9cdcaa38 --- /dev/null +++ b/internal/parsers/ini_test.go @@ -0,0 +1,41 @@ +package parsers + +import ( + "os" + "testing" +) + +func TestParsingINIFiles(t *testing.T) { + + t.Run("INI with valid contents", func(t *testing.T) { + tempFile, err := os.CreateTemp("", "test") + + if err != nil { + t.Errorf("Error creating temp file: %s", err) + } + + defer os.Remove(tempFile.Name()) + + contents := []byte(`[section] + key=value`) + + if _, err := tempFile.Write(contents); err != nil { + t.Errorf("Error writing to temp file: %s", err) + } + + data, err := ParseINIFile(tempFile.Name()) + + if err != nil { + t.Errorf("Error parsing INI file: %s", err) + } + + if len(data) != 1 { + t.Errorf("Data length is wrong: %d", len(data)) + } + + if data["key"] != "value" { + t.Errorf("Data is wrong: %s", data["key"]) + } + }) + +} diff --git a/internal/parsers/markdown.go b/internal/parsers/markdown.go new file mode 100644 index 00000000..d99ddb38 --- /dev/null +++ b/internal/parsers/markdown.go @@ -0,0 +1,212 @@ +package parsers + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/Azure/InnovationEngine/internal/logging" + "github.com/yuin/goldmark" + "github.com/yuin/goldmark/ast" + "github.com/yuin/goldmark/extension" + "github.com/yuin/goldmark/parser" + "github.com/yuin/goldmark/renderer/html" + "github.com/yuin/goldmark/text" +) + +var markdownParser = goldmark.New( + goldmark.WithExtensions(extension.GFM), + goldmark.WithParserOptions( + parser.WithAutoHeadingID(), + parser.WithBlockParsers(), + ), + goldmark.WithRendererOptions( + html.WithXHTML(), + ), +) + +// Parses a markdown file into an AST representing the markdown document. +func ParseMarkdownIntoAst(source []byte) ast.Node { + document := markdownParser.Parser().Parse(text.NewReader(source)) + return document +} + +// The representation of an expected output block in a markdown file. This is +// for scenarios that have expected output that should be validated against the +// actual output. +type ExpectedOutputBlock struct { + Language string + Content string + ExpectedSimilarity float64 +} + +// The representation of a code block in a markdown file. +type CodeBlock struct { + Language string + Content string + Header string + ExpectedOutput ExpectedOutputBlock +} + +// Assumes the title of the scenario is the first h1 header in the +// markdown file. +func ExtractScenarioTitleFromAst(node ast.Node, source []byte) (string, error) { + header := "" + ast.Walk(node, func(node ast.Node, entering bool) (ast.WalkStatus, error) { + if entering { + switch n := node.(type) { + case *ast.Heading: + if n.Level == 1 { + header = string(extractTextFromMarkdown(&n.BaseBlock, source)) + return ast.WalkStop, nil + } + } + } + return ast.WalkContinue, nil + }) + + if header == "" { + return "", fmt.Errorf("no header found") + } + + return header, nil +} + +var expectedSimilarityRegex = regexp.MustCompile(``) + +// Extracts the code blocks from a provided markdown AST that match the +// languagesToExtract. +func ExtractCodeBlocksFromAst( + node ast.Node, + source []byte, + languagesToExtract []string, +) []CodeBlock { + var lastHeader string + var commands []CodeBlock + var nextBlockIsExpectedOutput bool + var lastExpectedSimilarityScore float64 + + ast.Walk(node, func(node ast.Node, entering bool) (ast.WalkStatus, error) { + if entering { + switch n := node.(type) { + // Set the last header when we encounter a heading. + case *ast.Heading: + lastHeader = string(extractTextFromMarkdown(&n.BaseBlock, source)) + // Extract the code block if it matches the language. + case *ast.HTMLBlock: + content := extractTextFromMarkdown(&n.BaseBlock, source) + match := expectedSimilarityRegex.FindStringSubmatch(content) + + // TODO(vmarcella): Add better error handling for when the + // score isn't parsable as a float. + if match != nil { + score, err := strconv.ParseFloat(match[1], 64) + logging.GlobalLogger.Debugf("Simalrity score of %f found", score) + if err != nil { + return ast.WalkStop, err + } + lastExpectedSimilarityScore = score + nextBlockIsExpectedOutput = true + } + + case *ast.FencedCodeBlock: + language := string(n.Language((source))) + for _, desiredLanguage := range languagesToExtract { + if language == desiredLanguage { + command := CodeBlock{ + Language: language, + Content: extractTextFromMarkdown(&n.BaseBlock, source), + Header: lastHeader, + } + commands = append(commands, command) + break + } else if nextBlockIsExpectedOutput { + // Map the expected output to the last command. If there + // are no commands, then we ignore the expected output. + if len(commands) > 0 { + expectedOutputBlock := ExpectedOutputBlock{ + Language: language, + Content: extractTextFromMarkdown(&n.BaseBlock, source), + ExpectedSimilarity: lastExpectedSimilarityScore, + } + commands[len(commands)-1].ExpectedOutput = expectedOutputBlock + + // Reset the expected output state. + nextBlockIsExpectedOutput = false + lastExpectedSimilarityScore = 0 + } + break + } + } + } + } + return ast.WalkContinue, nil + }) + + return commands +} + +// This regex matches HTML comments within markdown blocks that contain +// variables to use within the scenario. +var variableCommentBlockRegex = regexp.MustCompile("(?s) +```output +{ + "id": "/subscriptions/325e7c34-99fb-4190-aa87-1df746c67705/resourceGroups/aksQuickstartResourceGroup", + "location": "eastus", + "managedBy": null, + "name": "aksQuickstartResourceGroup", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null, + "type": "Microsoft.Resources/resourceGroups" +} +``` + +Now we can create an AKS cluster within that resource group. + +```azurecli-interactive +az aks create --resource-group $RESOURCE_GROUP_NAME --name $AKS_CLUSTER_NAME --location $RESOURCE_LOCATION --node-count 1 --generate-ssh-keys +``` + +This will take a little while to run, when it completes you should see an output that looks something like this: + + +```output +{ + "aadProfile": null, + "addonProfiles": null, + "agentPoolProfiles": [ + { + "availabilityZones": null, + "count": 1, + "creationData": null, + "currentOrchestratorVersion": "1.24.9", + "enableAutoScaling": false, + "enableEncryptionAtHost": false, + "enableFips": false, + "enableNodePublicIp": false, + "enableUltraSsd": false, + "gpuInstanceProfile": null, + "hostGroupId": null, + "kubeletConfig": null, + "kubeletDiskType": "OS", + "linuxOsConfig": null, + "maxCount": null, + "maxPods": 110, + "minCount": null, + "mode": "System", + "name": "nodepool1", + "nodeImageVersion": "AKSUbuntu-1804gen2containerd-2023.01.20", + "nodeLabels": null, + "nodePublicIpPrefixId": null, + "nodeTaints": null, + "orchestratorVersion": "1.24.9", + "osDiskSizeGb": 128, + "osDiskType": "Managed", + "osSku": "Ubuntu", + "osType": "Linux", + "podSubnetId": null, + "powerState": { + "code": "Running" + }, + "provisioningState": "Succeeded", + "proximityPlacementGroupId": null, + "scaleDownMode": null, + "scaleSetEvictionPolicy": null, + "scaleSetPriority": null, + "spotMaxPrice": null, + "tags": null, + "type": "VirtualMachineScaleSets", + "upgradeSettings": { + "maxSurge": null + }, + "vmSize": "Standard_DS2_v2", + "vnetSubnetId": null, + "workloadRuntime": null + } + ], + "apiServerAccessProfile": null, + "autoScalerProfile": null, + "autoUpgradeProfile": null, + "azurePortalFqdn": "aksquickst-aksquickstartres-325e7c-784c55cf.portal.hcp.eastus.azmk8s.io", + "currentKubernetesVersion": "1.24.9", + "disableLocalAccounts": false, + "diskEncryptionSetId": null, + "dnsPrefix": "aksQuickst-aksQuickstartRes-325e7c", + "enablePodSecurityPolicy": null, + "enableRbac": true, + "extendedLocation": null, + "fqdn": "aksquickst-aksquickstartres-325e7c-784c55cf.hcp.eastus.azmk8s.io", + "fqdnSubdomain": null, + "httpProxyConfig": null, + "id": "/subscriptions/325e7c34-99fb-4190-aa87-1df746c67705/resourcegroups/aksQuickstartResourceGroup/providers/Microsoft.ContainerService/managedClusters/aksQuickstartCluster", + "identity": { + "principalId": "REDACTED", + "tenantId": "REDACTED", + "type": "SystemAssigned", + "userAssignedIdentities": null + }, + "identityProfile": { + "kubeletidentity": { + "clientId": "REDACTED", + "objectId": "REDACTED", + "resourceId": "/subscriptions/REDACTED/resourcegroups/MC_aksQuickstartResourceGroup_aksQuickstartCluster_eastus/providers/Microsoft.ManagedIdentity/userAssignedIdentities/aksQuickstartCluster-agentpool" + } + }, + "kubernetesVersion": "1.24.9", + "linuxProfile": { + "adminUsername": "azureuser", + "ssh": { + "publicKeys": [ + { + "keyData": "ssh-rsa REDACTED" + } + ] + } + }, + "location": "eastus", + "maxAgentPools": 100, + "name": "aksQuickstartCluster", + "networkProfile": { + "dnsServiceIp": "10.0.0.10", + "dockerBridgeCidr": "172.17.0.1/16", + "ipFamilies": [ + "IPv4" + ], + "loadBalancerProfile": { + "allocatedOutboundPorts": null, + "effectiveOutboundIPs": [ + { + "id": "/subscriptions/REDACTED/resourceGroups/MC_aksQuickstartResourceGroup_aksQuickstartCluster_eastus/providers/Microsoft.Network/publicIPAddresses/e19ddc6c-0842-45d5-814d-702cc95945ce", + "resourceGroup": "MC_aksQuickstartResourceGroup_aksQuickstartCluster_eastus" + } + ], + "enableMultipleStandardLoadBalancers": null, + "idleTimeoutInMinutes": null, + "managedOutboundIPs": { + "count": 1, + "countIpv6": null + }, + "outboundIPs": null, + "outboundIpPrefixes": null + }, + "loadBalancerSku": "Standard", + "natGatewayProfile": null, + "networkMode": null, + "networkPlugin": "kubenet", + "networkPolicy": null, + "outboundType": "loadBalancer", + "podCidr": "10.244.0.0/16", + "podCidrs": [ + "10.244.0.0/16" + ], + "serviceCidr": "10.0.0.0/16", + "serviceCidrs": [ + "10.0.0.0/16" + ] + }, + "nodeResourceGroup": "MC_aksQuickstartResourceGroup_aksQuickstartCluster_eastus", + "oidcIssuerProfile": { + "enabled": false, + "issuerUrl": null + }, + "podIdentityProfile": null, + "powerState": { + "code": "Running" + }, + "privateFqdn": null, + "privateLinkResources": null, + "provisioningState": "Succeeded", + "publicNetworkAccess": null, + "resourceGroup": "aksQuickstartResourceGroup", + "securityProfile": { + "azureKeyVaultKms": null, + "defender": null + }, + "servicePrincipalProfile": { + "clientId": "msi", + "secret": null + }, + "sku": { + "name": "Basic", + "tier": "Free" + }, + "storageProfile": { + "blobCsiDriver": null, + "diskCsiDriver": { + "enabled": true + }, + "fileCsiDriver": { + "enabled": true + }, + "snapshotController": { + "enabled": true + } + }, + "systemData": null, + "tags": null, + "type": "Microsoft.ContainerService/ManagedClusters", + "windowsProfile": null +} +``` + +### [Azure PowerShell](#tab/azure-powershell) + +Create an AKS cluster using the [New-AzAksCluster][new-azakscluster] command. The following example creates a resource group *MyResourceGroup* and a cluster named *MyAKS* with one node in the *MyResourceGroup* resource group: + +```azurepowershell-interactive +New-AzResourceGroup -Name MyResourceGroup -Location eastus +New-AzAksCluster -ResourceGroupName MyResourceGroup -Name MyAKS -Location eastus -NodeCount 1 -GenerateSshKey +``` + +--- + +## Subscribe to AKS events + +### [Azure CLI](#tab/azure-cli) + +Create a namespace and event hub using [az eventhubs namespace create][az-eventhubs-namespace-create] and [az eventhubs eventhub create][az-eventhubs-eventhub-create]. The following example creates a namespace *MyNamespace* and an event hub *MyEventGridHub* in *MyNamespace*, both in the *MyResourceGroup* resource group. + +```azurecli-interactive +az eventhubs namespace create --location $RESOURCE_LOCATION --name $NAMESPACE_NAME --resource-group $RESOURCE_GROUP_NAME +``` + + +```output +{ + "alternateName": null, + "clusterArmId": null, + "createdAt": "2023-02-11T00:27:48.977000+00:00", + "disableLocalAuth": false, + "encryption": null, + "id": "/subscriptions/325e7c34-99fb-4190-aa87-1df746c67705/resourceGroups/aksQuickstartResourceGroup/providers/Microsoft.EventHub/namespaces/aksQuickstartNamespace00021677", + "identity": null, + "isAutoInflateEnabled": false, + "kafkaEnabled": true, + "location": "East US", + "maximumThroughputUnits": 0, + "metricId": "325e7c34-99fb-4190-aa87-1df746c67705:aksquickstartnamespace00021677", + "minimumTlsVersion": "1.2", + "name": "aksQuickstartNamespace00021677", + "privateEndpointConnections": null, + "provisioningState": "Succeeded", + "publicNetworkAccess": "Enabled", + "resourceGroup": "aksQuickstartResourceGroup", + "serviceBusEndpoint": "https://aksQuickstartNamespace00021677.servicebus.windows.net:443/", + "sku": { + "capacity": 1, + "name": "Standard", + "tier": "Standard" + }, + "status": "Active", + "systemData": null, + "tags": {}, + "type": "Microsoft.EventHub/Namespaces", + "updatedAt": "2023-02-11T00:28:40.050000+00:00", + "zoneRedundant": false +} +``` + +```azurecli-interactive +az eventhubs eventhub create --name $EVENT_GRID_HUB_NAME --namespace-name $NAMESPACE_NAME --resource-group $RESOURCE_GROUP_NAME +``` + + +```output +{ + "alternateName": null, + "clusterArmId": null, + "createdAt": "2023-02-11T00:27:48.977000+00:00", + "disableLocalAuth": false, + "encryption": null, + "id": "/subscriptions/325e7c34-99fb-4190-aa87-1df746c67705/resourceGroups/aksQuickstartResourceGroup/providers/Microsoft.EventHub/namespaces/aksQuickstartNamespace00021677", + "identity": null, + "isAutoInflateEnabled": false, + "kafkaEnabled": true, + "location": "East US", + "maximumThroughputUnits": 0, + "metricId": "325e7c34-99fb-4190-aa87-1df746c67705:aksquickstartnamespace00021677", + "minimumTlsVersion": "1.2", + "name": "aksQuickstartNamespace00021677", + "privateEndpointConnections": null, + "provisioningState": "Succeeded", + "publicNetworkAccess": "Enabled", + "resourceGroup": "aksQuickstartResourceGroup", + "serviceBusEndpoint": "https://aksQuickstartNamespace00021677.servicebus.windows.net:443/", + "sku": { + "capacity": 1, + "name": "Standard", + "tier": "Standard" + }, + "status": "Active", + "systemData": null, + "tags": {}, + "type": "Microsoft.EventHub/Namespaces", + "updatedAt": "2023-02-11T00:29:54.450000+00:00", + "zoneRedundant": false +} +``` + +> [!NOTE] +> The *name* of your namespace must be unique. In the defaults above we set a random postfix to try to ensure it is unique, but this is not guaranteed. + +Subscribe to the AKS events using [az eventgrid event-subscription create][az-eventgrid-event-subscription-create]: + +First we need the resource ID and endpoint, which we will store in an environment variables for later use: + +```azurecli-interactive +SOURCE_RESOURCE_ID=$(az aks show -g $RESOURCE_GROUP_NAME -n $AKS_CLUSTER_NAME --query id --output tsv) +ENDPOINT=$(az eventhubs eventhub show -g $RESOURCE_GROUP_NAME -n $EVENT_GRID_HUB_NAME --namespace-name $NAMESPACE_NAME --query id --output tsv) +``` + +Now we can actually subscribe to the events: + +```azurecli-interactive +az eventgrid event-subscription create --name $EVENT_GRID_SUBSCRIPTION_NAME \ + --source-resource-id $SOURCE_RESOURCE_ID \ + --endpoint-type eventhub \ + --endpoint $ENDPOINT +``` + + +```output +{ + "deadLetterDestination": null, + "deadLetterWithResourceIdentity": null, + "deliveryWithResourceIdentity": null, + "destination": { + "deliveryAttributeMappings": null, + "endpointType": "EventHub", + "resourceId": "/subscriptions/REDACTED/resourceGroups/aksQuickstartResourceGroup/providers/Microsoft.EventHub/namespaces/aksQuickstartNamespace00006800/eventhubs/aksQuickstartEventGridHub" + }, + "eventDeliverySchema": "EventGridSchema", + "expirationTimeUtc": null, + "filter": { + "advancedFilters": null, + "enableAdvancedFilteringOnArrays": null, + "includedEventTypes": [ + "Microsoft.ContainerService.NewKubernetesVersionAvailable" + ], + "isSubjectCaseSensitive": null, + "subjectBeginsWith": "", + "subjectEndsWith": "" + }, + "id": "/subscriptions/REDACTED/resourceGroups/aksQuickstartResourceGroup/providers/Microsoft.ContainerService/managedClusters/aksQuickstartCluster/providers/Microsoft.EventGrid/eventSubscriptions/aksQuickstartEventGridSubscription", + "labels": null, + "name": "aksQuickstartEventGridSubscription", + "provisioningState": "Succeeded", + "resourceGroup": "aksQuickstartResourceGroup", + "retryPolicy": { + "eventTimeToLiveInMinutes": 1440, + "maxDeliveryAttempts": 30 + }, + "systemData": null, + "topic": "/subscriptions/REDACTED/resourceGroups/aksquickstartresourcegroup/providers/microsoft.containerservice/managedclusters/aksquickstartcluster", + "type": "Microsoft.EventGrid/eventSubscriptions" +} +``` + +Verify your subscription to AKS events using `az eventgrid event-subscription list`: + +```azurecli-interactive +az eventgrid event-subscription list --source-resource-id $SOURCE_RESOURCE_ID +``` + +The following example output shows you're subscribed to events from the *MyAKS* cluster and those events are delivered to the *MyEventGridHub* event hub: + +```output +[ + { + "deadLetterDestination": null, + "deadLetterWithResourceIdentity": null, + "deliveryWithResourceIdentity": null, + "destination": { + "deliveryAttributeMappings": null, + "endpointType": "EventHub", + "resourceId": "/subscriptions/SUBSCRIPTION_ID/resourceGroups/MyResourceGroup/providers/Microsoft.EventHub/namespaces/MyNamespace/eventhubs/MyEventGridHub" + }, + "eventDeliverySchema": "EventGridSchema", + "expirationTimeUtc": null, + "filter": { + "advancedFilters": null, + "enableAdvancedFilteringOnArrays": null, + "includedEventTypes": [ + "Microsoft.ContainerService.NewKubernetesVersionAvailable" + ], + "isSubjectCaseSensitive": null, + "subjectBeginsWith": "", + "subjectEndsWith": "" + }, + "id": "/subscriptions/SUBSCRIPTION_ID/resourceGroups/MyResourceGroup/providers/Microsoft.ContainerService/managedClusters/MyAKS/providers/Microsoft.EventGrid/eventSubscriptions/MyEventGridSubscription", + "labels": null, + "name": "MyEventGridSubscription", + "provisioningState": "Succeeded", + "resourceGroup": "MyResourceGroup", + "retryPolicy": { + "eventTimeToLiveInMinutes": 1440, + "maxDeliveryAttempts": 30 + }, + "systemData": null, + "topic": "/subscriptions/SUBSCRIPTION_ID/resourceGroups/MyResourceGroup/providers/microsoft.containerservice/managedclusters/MyAKS", + "type": "Microsoft.EventGrid/eventSubscriptions" + } +] +``` + +### [Azure PowerShell](#tab/azure-powershell) + +Create a namespace and event hub using [New-AzEventHubNamespace][new-azeventhubnamespace] and [New-AzEventHub][new-azeventhub]. The following example creates a namespace *MyNamespace* and an event hub *MyEventGridHub* in *MyNamespace*, both in the *MyResourceGroup* resource group. + +```azurepowershell-interactive +New-AzEventHubNamespace -Location eastus -Name MyNamespace -ResourceGroupName $RESOURCE_GROUP_NAME +New-AzEventHub -Name MyEventGridHub -Namespace MyNamespace -ResourceGroupName $RESOURCE_GROUP_NAME +``` + +> [!NOTE] +> The *name* of your namespace must be unique. + +Subscribe to the AKS events using [New-AzEventGridSubscription][new-azeventgridsubscription]: + +```azurepowershell-interactive +$SOURCE_RESOURCE_ID = (Get-AzAksCluster -ResourceGroupName MyResourceGroup -Name MyAKS).Id +$ENDPOINT = (Get-AzEventHub -ResourceGroupName MyResourceGroup -EventHubName MyEventGridHub -Namespace MyNamespace).Id +$params = @{ + EventSubscriptionName = 'MyEventGridSubscription' + ResourceId = $SOURCE_RESOURCE_ID + EndpointType = 'eventhub' + Endpoint = $ENDPOINT +} +New-AzEventGridSubscription @params +``` + +Verify your subscription to AKS events using `Get-AzEventGridSubscription`: + +```azurepowershell-interactive +Get-AzEventGridSubscription -ResourceId $SOURCE_RESOURCE_ID | Select-Object -ExpandProperty PSEventSubscriptionsList +``` + +The following example output shows you're subscribed to events from the *MyAKS* cluster and those events are delivered to the *MyEventGridHub* event hub: + +```Output +EventSubscriptionName : MyEventGridSubscription +Id : /subscriptions/SUBSCRIPTION_ID/resourceGroups/MyResourceGroup/providers/Microsoft.ContainerService/managedClusters/MyAKS/providers/Microsoft.EventGrid/eventSubscriptions/MyEventGridSubscription +Type : Microsoft.EventGrid/eventSubscriptions +Topic : /subscriptions/SUBSCRIPTION_ID/resourceGroups/myresourcegroup/providers/microsoft.containerservice/managedclusters/myaks +Filter : Microsoft.Azure.Management.EventGrid.Models.EventSubscriptionFilter +Destination : Microsoft.Azure.Management.EventGrid.Models.EventHubEventSubscriptionDestination +ProvisioningState : Succeeded +Labels : +EventTtl : 1440 +MaxDeliveryAttempt : 30 +EventDeliverySchema : EventGridSchema +ExpirationDate : +DeadLetterEndpoint : +Endpoint : /subscriptions/SUBSCRIPTION_ID/resourceGroups/MyResourceGroup/providers/Microsoft.EventHub/namespaces/MyNamespace/eventhubs/MyEventGridHub +``` + +--- + +When AKS events occur, you'll see those events appear in your event hub. For example, when the list of available Kubernetes versions for your clusters changes, you'll see a `Microsoft.ContainerService.NewKubernetesVersionAvailable` event. For more information on the events AKS emits, see [Azure Kubernetes Service (AKS) as an Event Grid source][aks-events]. + +## Delete the cluster and subscriptions + +### [Azure CLI](#tab/azure-cli) + +Use the [az group delete][az-group-delete] command to remove the resource group, the AKS cluster, namespace, and event hub, and all related resources. + +```azurecli-interactive +az group delete --name $RESOURCE_GROUP_NAME --yes --no-wait +``` + +### [Azure PowerShell](#tab/azure-powershell) + +Use the [Remove-AzResourceGroup][remove-azresourcegroup] cmdlet to remove the resource group, the AKS cluster, namespace, and event hub, and all related resources. + +```azurepowershell-interactive +Remove-AzResourceGroup -Name MyResourceGroup +``` + +--- + +> [!NOTE] +> When you delete the cluster, the Azure Active Directory service principal used by the AKS cluster is not removed. For steps on how to remove the service principal, see [AKS service principal considerations and deletion][sp-delete]. +> +> If you used a managed identity, the identity is managed by the platform and does not require removal. + +## Next steps + +In this quickstart, you deployed a Kubernetes cluster and then subscribed to AKS events in Azure Event Hubs. + +To learn more about AKS, and walk through a complete code to deployment example, continue to the Kubernetes cluster tutorial. + +> [!div class="nextstepaction"] +> [AKS tutorial][aks-tutorial] + +[azure-cli-install]: /cli/azure/install-azure-cli +[azure-powershell-install]: /powershell/azure/install-az-ps +[aks-events]: ../event-grid/event-schema-aks.md +[aks-tutorial]: ./tutorial-kubernetes-prepare-app.md +[az-aks-create]: /cli/azure/aks#az_aks_create +[new-azakscluster]: /powershell/module/az.aks/new-azakscluster +[az-eventhubs-namespace-create]: /cli/azure/eventhubs/namespace#az-eventhubs-namespace-create +[new-azeventhubnamespace]: /powershell/module/az.eventhub/new-azeventhubnamespace +[az-eventhubs-eventhub-create]: /cli/azure/eventhubs/eventhub#az-eventhubs-eventhub-create +[new-azeventhub]: /powershell/module/az.eventhub/new-azeventhub +[az-eventgrid-event-subscription-create]: /cli/azure/eventgrid/event-subscription#az-eventgrid-event-subscription-create +[new-azeventgridsubscription]: /powershell/module/az.eventgrid/new-azeventgridsubscription +[az-group-delete]: /cli/azure/group#az_group_delete +[sp-delete]: kubernetes-service-principal.md#other-considerations +[remove-azresourcegroup]: /powershell/module/az.resources/remove-azresourcegroup \ No newline at end of file diff --git a/scenarios/demos/createVMCommentVars.md b/scenarios/demos/createVMCommentVars.md new file mode 100644 index 00000000..016edbfa --- /dev/null +++ b/scenarios/demos/createVMCommentVars.md @@ -0,0 +1,150 @@ + + + +The following example uses a comment block to set environment variables which are used throughout the file as one of the acceptable patterns for an executable document. + + +# Quickstart: Create a Linux virtual machine with the Azure CLI + +**Applies to:** :heavy_check_mark: Linux VMs + +This quickstart shows you how to use the Azure CLI to deploy a Linux virtual machine (VM) in Azure. The Azure CLI is used to create and manage Azure resources via either the command line or scripts. + +In this tutorial, we will be installing the latest Debian image. To show the VM in action, you'll connect to it using SSH and install the NGINX web server. + +If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. + +## Launch Azure Cloud Shell + +The Azure Cloud Shell is a free interactive shell that you can use to run the steps in this article. It has common Azure tools preinstalled and configured to use with your account. + +To open the Cloud Shell, just select **Try it** from the upper right corner of a code block. You can also open Cloud Shell in a separate browser tab by going to [https://shell.azure.com/bash](https://shell.azure.com/bash). Select **Copy** to copy the blocks of code, paste it into the Cloud Shell, and select **Enter** to run it. + +If you prefer to install and use the CLI locally, this quickstart requires Azure CLI version 2.0.30 or later. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI]( /cli/azure/install-azure-cli). + + + +## Create a resource group + +Create a resource group with the [az group create](/cli/azure/group) command. An Azure resource group is a logical container into which Azure resources are deployed and managed. The following example creates a resource group named *myResourceGroup* in the *eastus* location: + +```bash +az group create --name $MY_RESOURCE_GROUP_NAME --location $MY_LOCATION +``` + +## Create virtual machine + +Create a VM with the [az vm create](/cli/azure/vm) command. + +The following example creates a VM named *myVM* and adds a user account named *azureuser*. The `--generate-ssh-keys` parameter is used to automatically generate an SSH key, and put it in the default key location (*~/.ssh*). To use a specific set of keys instead, use the `--ssh-key-values` option. + +```bash +az vm create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_VM_NAME \ + --image $MY_VM_IMAGE \ + --admin-username $MY_ADMIN_USERNAME \ + --generate-ssh-keys +``` + +It takes a few minutes to create the VM and supporting resources. The following example output shows the VM create operation was successful. + +```Output +{ + "fqdns": "", + "id": "/subscriptions//resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM", + "location": "eastus", + "macAddress": "00-0D-3A-23-9A-49", + "powerState": "VM running", + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "40.68.254.142", + "resourceGroup": "myResourceGroup" +} +``` + +Make a note of the `publicIpAddress` to use later. + +## Install web server + +To see your VM in action, install the NGINX web server. Update your package sources and then install the latest NGINX package. + +```bash +az vm run-command invoke \ + -g $MY_RESOURCE_GROUP_NAME \ + -n $MY_VM_NAME \ + --command-id RunShellScript \ + --scripts "sudo apt-get update && sudo apt-get install -y nginx" +``` + +## Open port 80 for web traffic + +By default, only SSH connections are opened when you create a Linux VM in Azure. Use [az vm open-port](/cli/azure/vm) to open TCP port 80 for use with the NGINX web server: + +```bash +az vm open-port --port 80 --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_VM_NAME +``` + +## View the web server in action + +Use a web browser of your choice to view the default NGINX welcome page. Use the public IP address of your VM as the web address. The following example shows the default NGINX web site: + +![Screenshot showing the N G I N X default web page.](./media/quick-create-cli/nginix-welcome-page-debian.png) + +Or Run the following command to see the NGINX welcome page in terminal + +```bash + curl $(az vm show -d -g $MY_RESOURCE_GROUP_NAME -n $MY_VM_NAME --query "publicIps" -o tsv) +``` + + +```HTML + + + +Welcome to nginx! + + + +

Welcome to nginx!

+

If you see this page, the nginx web server is successfully installed and +working. Further configuration is required.

+ +

For online documentation and support please refer to +nginx.org.
+Commercial support is available at +nginx.com.

+ +

Thank you for using nginx.

+ + +``` + +## Clean up resources + +When no longer needed, you can use the [az group delete](/cli/azure/group) command to remove the resource group, VM, and all related resources. + +```bash +az group delete --name $MY_RESOURCE_GROUP_NAME --no-wait --yes --verbose +``` + +## Next steps + +In this quickstart, you deployed a simple virtual machine, opened a network port for web traffic, and installed a basic web server. To learn more about Azure virtual machines, continue to the tutorial for Linux VMs. + + +> [!div class="nextstepaction"] +> [Azure Linux virtual machine tutorials](./tutorial-manage-vm.md) diff --git a/scenarios/demos/createVMEnvVars.ini b/scenarios/demos/createVMEnvVars.ini new file mode 100644 index 00000000..ad6cd15a --- /dev/null +++ b/scenarios/demos/createVMEnvVars.ini @@ -0,0 +1,5 @@ +MY_RESOURCE_GROUP_NAME = myResourceGroup +MY_LOCATION = eastus +MY_VM_NAME = myVM +MY_VM_IMAGE = debian +MY_ADMIN_USERNAME = azureuser \ No newline at end of file diff --git a/scenarios/demos/createVMEnvVars.md b/scenarios/demos/createVMEnvVars.md new file mode 100644 index 00000000..876a554d --- /dev/null +++ b/scenarios/demos/createVMEnvVars.md @@ -0,0 +1,140 @@ + +The following example uses a .ini file which is named azureVmCreateEnvVariables.ini to set environment variables which are used throughout the file as one of the acceptable patterns for an executable document. + +# Quickstart: Create a Linux virtual machine with the Azure CLI + +**Applies to:** :heavy_check_mark: Linux VMs + +This quickstart shows you how to use the Azure CLI to deploy a Linux virtual machine (VM) in Azure. The Azure CLI is used to create and manage Azure resources via either the command line or scripts. + +In this tutorial, we will be installing the latest Debian image. To show the VM in action, you'll connect to it using SSH and install the NGINX web server. + +If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. + +## Launch Azure Cloud Shell + +The Azure Cloud Shell is a free interactive shell that you can use to run the steps in this article. It has common Azure tools preinstalled and configured to use with your account. + +To open the Cloud Shell, just select **Try it** from the upper right corner of a code block. You can also open Cloud Shell in a separate browser tab by going to [https://shell.azure.com/bash](https://shell.azure.com/bash). Select **Copy** to copy the blocks of code, paste it into the Cloud Shell, and select **Enter** to run it. + +If you prefer to install and use the CLI locally, this quickstart requires Azure CLI version 2.0.30 or later. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI]( /cli/azure/install-azure-cli). + + + +## Create a resource group + +Create a resource group with the [az group create](/cli/azure/group) command. An Azure resource group is a logical container into which Azure resources are deployed and managed. The following example creates a resource group named *myResourceGroup* in the *eastus* location: + +```bash +az group create --name $MY_RESOURCE_GROUP_NAME --location $MY_LOCATION +``` + +## Create virtual machine + +Create a VM with the [az vm create](/cli/azure/vm) command. + +The following example creates a VM named *myVM* and adds a user account named *azureuser*. The `--generate-ssh-keys` parameter is used to automatically generate an SSH key, and put it in the default key location (*~/.ssh*). To use a specific set of keys instead, use the `--ssh-key-values` option. + +```bash +az vm create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_VM_NAME \ + --image $MY_VM_IMAGE \ + --admin-username $MY_ADMIN_USERNAME \ + --public-ip-sku Standard \ + --generate-ssh-keys +``` + +It takes a few minutes to create the VM and supporting resources. The following example output shows the VM create operation was successful. + +```json +{ + "fqdns": "", + "id": "/subscriptions//resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM", + "location": "eastus", + "macAddress": "00-0D-3A-23-9A-49", + "powerState": "VM running", + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "40.68.254.142", + "resourceGroup": "myResourceGroup" +} +``` + +Make a note of the `publicIpAddress` to use later. + +## Install web server + +To see your VM in action, install the NGINX web server. Update your package sources and then install the latest NGINX package. + +```bash +az vm run-command invoke \ + -g $MY_RESOURCE_GROUP_NAME \ + -n $MY_VM_NAME \ + --command-id RunShellScript \ + --scripts "sudo apt-get update && sudo apt-get install -y nginx" +``` + +## Open port 80 for web traffic + +By default, only SSH connections are opened when you create a Linux VM in Azure. Use [az vm open-port](/cli/azure/vm) to open TCP port 80 for use with the NGINX web server: + +```bash +az vm open-port --port 80 --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_VM_NAME +``` + +## View the web server in action + +Use a web browser of your choice to view the default NGINX welcome page. Use the public IP address of your VM as the web address. The following example shows the default NGINX web site: + +![Screenshot showing the N G I N X default web page.](./media/quick-create-cli/nginix-welcome-page-debian.png) + +Or Run the following command to see the NGINX welcome page in terminal + +```bash + curl $(az vm show -d -g myResourceGroup -n myVM --query "publicIps" -o tsv) +``` + + +```HTML + + + +Welcome to nginx! + + + +

Welcome to nginx!

+

If you see this page, the nginx web server is successfully installed and +working. Further configuration is required.

+ +

For online documentation and support please refer to +nginx.org.
+Commercial support is available at +nginx.com.

+ +

Thank you for using nginx.

+ + +``` + +## Clean up resources + +When no longer needed, you can use the [az group delete](/cli/azure/group) command to remove the resource group, VM, and all related resources. + +```bash +az group delete --name $MY_RESOURCE_GROUP_NAME --no-wait --yes --verbose +``` + +## Next steps + +In this quickstart, you deployed a simple virtual machine, opened a network port for web traffic, and installed a basic web server. To learn more about Azure virtual machines, continue to the tutorial for Linux VMs. + + +> [!div class="nextstepaction"] +> [Azure Linux virtual machine tutorials](./tutorial-manage-vm.md) diff --git a/scenarios/demos/vmssQuickstart.md b/scenarios/demos/vmssQuickstart.md new file mode 100644 index 00000000..c95b7156 --- /dev/null +++ b/scenarios/demos/vmssQuickstart.md @@ -0,0 +1,258 @@ +--- +title: Quickstart - Create a Virtual Machine Scale Set with Azure CLI +description: Get started with your deployments by learning how to quickly create a Virtual Machine Scale Set with Azure CLI. +author: ju-shim +ms.author: jushiman +ms.topic: quickstart +ms.service: virtual-machine-scale-sets +ms.date: 11/22/2022 +ms.reviewer: mimckitt +ms.custom: mimckitt, devx-track-azurecli, mode-api +--- + +# Quickstart: Create a Virtual Machine Scale Set with the Azure CLI + +**Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Windows VMs :heavy_check_mark: Uniform scale sets + +> [!NOTE] +> The following article is for Uniform Virtual Machine Scale Sets. We recommend using Flexible Virtual Machine Scale Sets for new workloads. Learn more about this new orchestration mode in our [Flexible Virtual Machine Scale Sets overview](flexible-virtual-machine-scale-sets.md). + +A Virtual Machine Scale Set allows you to deploy and manage a set of auto-scaling virtual machines. You can scale the number of VMs in the scale set manually, or define rules to autoscale based on resource usage like CPU, memory demand, or network traffic. An Azure load balancer then distributes traffic to the VM instances in the scale set. In this quickstart, you create a Virtual Machine Scale Set and deploy a sample application with the Azure CLI. + +[!INCLUDE [quickstarts-free-trial-note](../../includes/quickstarts-free-trial-note.md)] + +[!INCLUDE [azure-cli-prepare-your-environment.md](~/articles/reusable-content/azure-cli/azure-cli-prepare-your-environment.md)] + +- This article requires version 2.0.29 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. + + +## Define Environment Variables + +Throughout this document we use environment variables to facilitate cut and paste reuse. +The default values below will enable you to work through this document in most cases. The meaning of each +environment variable will be addressed as they are used in the steps below. + +```azurecli-interactive +export RESOURCE_GROUP_NAME=vmssQuickstartRG +export RESOURCE_LOCATION=eastus +export SCALE_SET_NAME=vmssQuickstart +export BASE_VM_IMAGE=UbuntuLTS +export ADMIN_USERNAME=azureuser +export LOAD_BALANCER_NAME=vmssQuickstartLB +export BACKEND_POOL_NAME=vmssQuickstartPool +export LOAD_BALANCER_RULE_NAME=vmssQuickstartRule +export FRONT_END_IP_NAME=vmssQuickstartLoadBalancerFrontEnd +export CUSTOM_SCRIPT_NAME=vmssQuickstartCustomScript +export SCALE_SET_PUBLIC_IP=vmssQuickstartPublicIP +``` + +## Create a scale set +Before you can create a scale set, create a resource group with [az group create](/cli/azure/group). The following example creates a resource group named *myResourceGroup* in the *eastus* location: + +```azurecli-interactive +az group create --name $RESOURCE_GROUP_NAME --location $RESOURCE_LOCATION +``` + + +```Output +{ + "id": "/subscriptions//resourceGroups/myResourceGroup", + "location": "eastus", + "managedBy": null, + "name": "myResourceGroup", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null, + "type": "Microsoft.Resources/resourceGroups" +} +``` + +Now create a Virtual Machine Scale Set with [az vmss create](/cli/azure/vmss). The following example creates a scale set named *myScaleSet* that is set to automatically update as changes are applied, and generates SSH keys if they do not exist in *~/.ssh/id_rsa*. These SSH keys are used if you need to log in to the VM instances. To use an existing set of SSH keys, instead use the `--ssh-key-value` parameter and specify the location of your keys. + +```azurecli-interactive +az vmss create \ + --resource-group $RESOURCE_GROUP_NAME \ + --name $SCALE_SET_NAME \ + --image $BASE_VM_IMAGE \ + --upgrade-policy-mode automatic \ + --admin-username $ADMIN_USERNAME \ + --generate-ssh-keys +``` + +It takes a few minutes to create and configure all the scale set resources and VMs. + + +## Deploy sample application +To test your scale set, install a basic web application. The Azure Custom Script Extension is used to download and run a script that installs an application on the VM instances. This extension is useful for post deployment configuration, software installation, or any other configuration / management task. For more information, see the [Custom Script Extension overview](../virtual-machines/extensions/custom-script-linux.md). + +Use the Custom Script Extension to install a basic NGINX web server. Apply the Custom Script Extension that installs NGINX with [az vmss extension set](/cli/azure/vmss/extension) as follows: + +```azurecli-interactive +az vmss extension set \ + --publisher Microsoft.Azure.Extensions \ + --version 2.0 \ + --name $CUSTOM_SCRIPT_NAME \ + --resource-group $RESOURCE_GROUP_NAME \ + --vmss-name $SCALE_SET_NAME \ + --settings '{"fileUris":["https://raw.githubusercontent.com/Azure-Samples/compute-automation-configurations/master/automate_nginx.sh"],"commandToExecute":"./automate_nginx.sh"}' +``` + +```Output +{ + "vmss": { + "doNotRunExtensionsOnOverprovisionedVMs": false, + "orchestrationMode": "Uniform", + "overprovision": true, + "provisioningState": "Succeeded", + "singlePlacementGroup": true, + "timeCreated": "2023-02-01T22:17:20.1117742+00:00", + "uniqueId": "38328143-69e8-4a9b-9d55-8a404cdb6d8b", + "upgradePolicy": { + "mode": "Automatic", + "rollingUpgradePolicy": { + "maxBatchInstancePercent": 20, + "maxSurge": false, + "maxUnhealthyInstancePercent": 20, + "maxUnhealthyUpgradedInstancePercent": 20, + "pauseTimeBetweenBatches": "PT0S", + "rollbackFailedInstancesOnPolicyBreach": false + } + }, + "virtualMachineProfile": { + "networkProfile": { + "networkInterfaceConfigurations": [ + { + "name": "mysca2132Nic", + "properties": { + "disableTcpStateTracking": false, + "dnsSettings": { + "dnsServers": [] + }, + "enableAcceleratedNetworking": false, + "enableIPForwarding": false, + "ipConfigurations": [ + { + "name": "mysca2132IPConfig", + "properties": { + "loadBalancerBackendAddressPools": [ + { + "id": "/subscriptions/f7a60fca-9977-4899-b907-005a076adbb6/resourceGroups/myResourceGroup/providers/Microsoft.Network/loadBalancers/myScaleSetLB/backendAddressPools/myScaleSetLBBEPool", + "resourceGroup": "myResourceGroup" + } + ], + "loadBalancerInboundNatPools": [ + { + "id": "/subscriptions/f7a60fca-9977-4899-b907-005a076adbb6/resourceGroups/myResourceGroup/providers/Microsoft.Network/loadBalancers/myScaleSetLB/inboundNatPools/myScaleSetLBNatPool", + "resourceGroup": "myResourceGroup" + } + ], + "privateIPAddressVersion": "IPv4", + "subnet": { + "id": "/subscriptions/f7a60fca-9977-4899-b907-005a076adbb6/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myScaleSetVNET/subnets/myScaleSetSubnet", + "resourceGroup": "myResourceGroup" + } + } + } + ], + "primary": true + } + } + ] + }, + "osProfile": { + "adminUsername": "azureuser", + "allowExtensionOperations": true, + "computerNamePrefix": "mysca2132", + "linuxConfiguration": { + "disablePasswordAuthentication": true, + "enableVMAgentPlatformUpdates": false, + "provisionVMAgent": true, + "ssh": { + "publicKeys": [ + { + "keyData": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCvR1+fGFuVMWS2bAY0SgW4E9QzLZ77ETdbCBUVF46eAyL8JWsLynX214hNSK16l4UYZyC3E6jea5qw2rGHPP4eMp7iif50xqd6qGICS428mqc9Gz29J0LFanM7XpHwLnBiJ6hmKvqvHB5tsGKh44MddW0wv+KiiEHIV1ZdSSvBRJ5MMQhqZoUiqlChHourOhaZxvw2dpJhRCvAEKw1s5RoeoLJAdZ6Qr53ERSkJr3BF7uAoNlGx6gatBVkjV+w9CZXN/YN62b1QQiGnk5/BIXNqEIsyxsa84+GbyieRIN/wYjSEV7ASRxSj60qV7RPexvAI+4JGa9UELYMQDrBElgL", + "path": "/home/azureuser/.ssh/authorized_keys" + } + ] + } + }, + "requireGuestProvisionSignal": true, + "secrets": [] + }, + "storageProfile": { + "imageReference": { + "offer": "UbuntuServer", + "publisher": "Canonical", + "sku": "18.04-LTS", + "version": "latest" + }, + "osDisk": { + "caching": "ReadWrite", + "createOption": "FromImage", + "diskSizeGB": 30, + "managedDisk": { + "storageAccountType": "Premium_LRS" + }, + "osType": "Linux" + } + } + } + } +} +``` + +## Allow traffic to application +When the scale set was created, an Azure load balancer was automatically deployed. The load balancer distributes traffic to the VM instances in the scale set. To allow traffic to reach the sample web application, create a load balancer rule with [az network lb rule create](/cli/azure/network/lb/rule). The following example creates a rule named *myLoadBalancerRuleWeb*: + +```azurecli-interactive +az network lb rule create \ + --resource-group $RESOURCE_GROUP_NAME \ + --name $LOAD_BALANCER_RULE_NAME \ + --lb-name $LOAD_BALANCER_NAME \ + --backend-pool-name $BACKEND_POOL_NAME \ + --backend-port 80 \ + --frontend-ip-name $FRONT_END_IP_NAME \ + --frontend-port 80 \ + --protocol tcp +``` + +## Test your scale set +To see your scale set in action, access the sample web application in a web browser. Obtain the public IP address of your load balancer with [az network public-ip show](/cli/azure/network/public-ip). The following example obtains the IP address for *myScaleSetLBPublicIP* created as part of the scale set: + +```azurecli-interactive +az network public-ip show \ + --resource-group $RESOURCE_GROUP_NAME \ + --name $SCALE_SET_PUBLIC_IP \ + --query '[ipAddress]' \ + --output tsv +``` + +Enter the public IP address of the load balancer in to a web browser. The load balancer distributes traffic to one of your VM instances, as shown in the following example: + +![Default web page in NGINX](media/virtual-machine-scale-sets-create-cli/running-nginx-site.png) + +Or run the following command in a local shell to validate the scale set is set up properly + +```bash + curl $(az network public-ip show --resource-group $RESOURCE_GROUP_NAME --name $SCALE_SET_PUBLIC_IP --query '[ipAddress]' --output tsv) +``` + + +```HTML +Hello World from host myscabd00000000 ! +``` + +## Clean up resources +When no longer needed, you can use [az group delete](/cli/azure/group) to remove the resource group, scale set, and all related resources as follows. The `--no-wait` parameter returns control to the prompt without waiting for the operation to complete. The `--yes` parameter confirms that you wish to delete the resources without an additional prompt to do so. + +```azurecli-interactive +az group delete --name $RESOURCE_GROUP_NAME --yes --no-wait +``` + + +## Next steps +In this quickstart, you created a basic scale set and used the Custom Script Extension to install a basic NGINX web server on the VM instances. To learn more, continue to the tutorial for how to create and manage Azure Virtual Machine Scale Sets. + +> [!div class="nextstepaction"] +> [Create and manage Azure Virtual Machine Scale Sets](tutorial-create-and-manage-cli.md) \ No newline at end of file diff --git a/scenarios/ocd/CreateAKSDeployment/README.md b/scenarios/ocd/CreateAKSDeployment/README.md new file mode 100644 index 00000000..abb781b9 --- /dev/null +++ b/scenarios/ocd/CreateAKSDeployment/README.md @@ -0,0 +1,390 @@ +# Quickstart: Deploy a Scalable & Secure Azure Kubernetes Service cluster using the Azure CLI + +Welcome to this tutorial where we will take you step by step in creating an Azure Kubernetes Web Application that is secured via https. This tutorial assumes you are logged into Azure CLI already and have selected a subscription to use with the CLI. It also assumes that you have Helm installed ([Instructions can be found here](https://helm.sh/docs/intro/install/)). + +## Define Environment Variables + +The first step in this tutorial is to define environment variables. + +```bash +export RANDOM_ID="$(openssl rand -hex 3)" +export NETWORK_PREFIX="$(($RANDOM % 254 + 1))" +export SSL_EMAIL_ADDRESS="$(az account show --query user.name --output tsv)" +export MY_RESOURCE_GROUP_NAME="myAKSResourceGroup$RANDOM_ID" +export REGION="eastus" +export MY_AKS_CLUSTER_NAME="myAKSCluster$RANDOM_ID" +export MY_PUBLIC_IP_NAME="myPublicIP$RANDOM_ID" +export MY_DNS_LABEL="mydnslabel$RANDOM_ID" +export MY_VNET_NAME="myVNet$RANDOM_ID" +export MY_VNET_PREFIX="10.$NETWORK_PREFIX.0.0/16" +export MY_SN_NAME="mySN$RANDOM_ID" +export MY_SN_PREFIX="10.$NETWORK_PREFIX.0.0/22" +export FQDN="${MY_DNS_LABEL}.${REGION}.cloudapp.azure.com" +``` + +## Create a resource group + +A resource group is a container for related resources. All resources must be placed in a resource group. We will create one for this tutorial. The following command creates a resource group with the previously defined $MY_RESOURCE_GROUP_NAME and $REGION parameters. + +```bash +az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION +``` + +Results: + + + +```JSON +{ + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAKSResourceGroupxxxxxx", + "location": "eastus", + "managedBy": null, + "name": "testResourceGroup", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null, + "type": "Microsoft.Resources/resourceGroups" +} +``` + +## Create a virtual network and subnet + +A virtual network is the fundamental building block for private networks in Azure. Azure Virtual Network enables Azure resources like VMs to securely communicate with each other and the internet. + +```bash +az network vnet create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --location $REGION \ + --name $MY_VNET_NAME \ + --address-prefix $MY_VNET_PREFIX \ + --subnet-name $MY_SN_NAME \ + --subnet-prefixes $MY_SN_PREFIX +``` + +Results: + + + +```JSON +{ + "newVNet": { + "addressSpace": { + "addressPrefixes": [ + "10.xxx.0.0/16" + ] + }, + "enableDdosProtection": false, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/myAKSResourceGroupxxxxxx/providers/Microsoft.Network/virtualNetworks/myVNetxxx", + "location": "eastus", + "name": "myVNetxxx", + "provisioningState": "Succeeded", + "resourceGroup": "myAKSResourceGroupxxxxxx", + "subnets": [ + { + "addressPrefix": "10.xxx.0.0/22", + "delegations": [], + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/myAKSResourceGroupxxxxxx/providers/Microsoft.Network/virtualNetworks/myVNetxxx/subnets/mySNxxx", + "name": "mySNxxx", + "privateEndpointNetworkPolicies": "Disabled", + "privateLinkServiceNetworkPolicies": "Enabled", + "provisioningState": "Succeeded", + "resourceGroup": "myAKSResourceGroupxxxxxx", + "type": "Microsoft.Network/virtualNetworks/subnets" + } + ], + "type": "Microsoft.Network/virtualNetworks", + "virtualNetworkPeerings": [] + } +} +``` + +## Register to AKS Azure Resource Providers + +Verify Microsoft.OperationsManagement and Microsoft.OperationalInsights providers are registered on your subscription. These are Azure resource providers required to support [Container insights](https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-overview). To check the registration status, run the following commands + +```bash +az provider register --namespace Microsoft.Insights +az provider register --namespace Microsoft.OperationsManagement +az provider register --namespace Microsoft.OperationalInsights +``` + +## Create AKS Cluster + +Create an AKS cluster using the az aks create command with the --enable-addons monitoring parameter to enable Container insights. The following example creates an autoscaling, availability zone enabled cluster. + +This will take a few minutes. + +```bash +export MY_SN_ID=$(az network vnet subnet list --resource-group $MY_RESOURCE_GROUP_NAME --vnet-name $MY_VNET_NAME --query "[0].id" --output tsv) +az aks create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_AKS_CLUSTER_NAME \ + --auto-upgrade-channel stable \ + --enable-cluster-autoscaler \ + --enable-addons monitoring \ + --location $REGION \ + --node-count 1 \ + --min-count 1 \ + --max-count 3 \ + --network-plugin azure \ + --network-policy azure \ + --vnet-subnet-id $MY_SN_ID \ + --no-ssh-key \ + --node-vm-size Standard_DS2_v2 \ + --zones 1 2 3 +``` + +## Connect to the cluster + +To manage a Kubernetes cluster, use the Kubernetes command-line client, kubectl. kubectl is already installed if you use Azure Cloud Shell. + +1. Install az aks CLI locally using the az aks install-cli command + + ```bash + if ! [ -x "$(command -v kubectl)" ]; then az aks install-cli; fi + ``` + +2. Configure kubectl to connect to your Kubernetes cluster using the az aks get-credentials command. The following command: + + - Downloads credentials and configures the Kubernetes CLI to use them. + - Uses ~/.kube/config, the default location for the Kubernetes configuration file. Specify a different location for your Kubernetes configuration file using --file argument. + + > [!WARNING] + > This will overwrite any existing credentials with the same entry + + ```bash + az aks get-credentials --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_AKS_CLUSTER_NAME --overwrite-existing + ``` + +3. Verify the connection to your cluster using the kubectl get command. This command returns a list of the cluster nodes. + + ```bash + kubectl get nodes + ``` + +## Install NGINX Ingress Controller + +```bash +export MY_STATIC_IP=$(az network public-ip create --resource-group MC_${MY_RESOURCE_GROUP_NAME}_${MY_AKS_CLUSTER_NAME}_${REGION} --location ${REGION} --name ${MY_PUBLIC_IP_NAME} --dns-name ${MY_DNS_LABEL} --sku Standard --allocation-method static --version IPv4 --zone 1 2 3 --query publicIp.ipAddress -o tsv) +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm upgrade --install ingress-nginx ingress-nginx/ingress-nginx \ + --namespace ingress-nginx \ + --create-namespace \ + --set controller.service.annotations."service\.beta\.kubernetes\.io/azure-dns-label-name"=$MY_DNS_LABEL \ + --set controller.service.loadBalancerIP=$MY_STATIC_IP \ + --set controller.service.annotations."service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path"=/healthz \ + --wait +``` + +## Deploy the Application + +A Kubernetes manifest file defines a cluster's desired state, such as which container images to run. + +In this quickstart, you will use a manifest to create all objects needed to run the Azure Vote application. This manifest includes two Kubernetes deployments: + +- The sample Azure Vote Python applications. +- A Redis instance. + +Two Kubernetes Services are also created: + +- An internal service for the Redis instance. +- An external service to access the Azure Vote application from the internet. + +Finally, an Ingress resource is created to route traffic to the Azure Vote application. + +A test voting app YML file is already prepared. To deploy this app run the following command + +```bash +kubectl apply -f azure-vote-start.yml +``` + +## Test The Application + +Validate that the application is running by either visiting the public ip or the application url. The application url can be found by running the following command: + +> [!Note] +> It often takes 2-3 minutes for the PODs to be created and the site to be reachable via HTTP + +```bash +runtime="5 minute"; +endtime=$(date -ud "$runtime" +%s); +while [[ $(date -u +%s) -le $endtime ]]; do + STATUS=$(kubectl get pods -l app=azure-vote-front -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}'); echo $STATUS; + if [ "$STATUS" == 'True' ]; then + break; + else + sleep 10; + fi; +done +``` + +```bash +curl "http://$FQDN" +``` + +Results: + + + +```HTML + + + + + Azure Voting App + + + + + +
+
+ +
+
+ + + +
+
+
Cats - 0 | Dogs - 0
+ +
+
+ + +``` + +## Add HTTPS termination to custom domain + +At this point in the tutorial you have an AKS web app with NGINX as the Ingress controller and a custom domain you can use to access your application. The next step is to add an SSL certificate to the domain so that users can reach your application securely via HTTPS. + +## Set Up Cert Manager + +In order to add HTTPS we are going to use Cert Manager. Cert Manager is an open source tool used to obtain and manage SSL certificate for Kubernetes deployments. Cert Manager will obtain certificates from a variety of Issuers, both popular public Issuers as well as private Issuers, and ensure the certificates are valid and up-to-date, and will attempt to renew certificates at a configured time before expiry. + +1. In order to install cert-manager, we must first create a namespace to run it in. This tutorial will install cert-manager into the cert-manager namespace. It is possible to run cert-manager in a different namespace, although you will need to make modifications to the deployment manifests. + + ```bash + kubectl create namespace cert-manager + ``` + +2. We can now install cert-manager. All resources are included in a single YAML manifest file. This can be installed by running the following: + + ```bash + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.7.0/cert-manager.crds.yaml + ``` + +3. Add the certmanager.k8s.io/disable-validation: "true" label to the cert-manager namespace by running the following. This will allow the system resources that cert-manager requires to bootstrap TLS to be created in its own namespace. + + ```bash + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + ``` + +## Obtain certificate via Helm Charts + +Helm is a Kubernetes deployment tool for automating creation, packaging, configuration, and deployment of applications and services to Kubernetes clusters. + +Cert-manager provides Helm charts as a first-class method of installation on Kubernetes. + +1. Add the Jetstack Helm repository + + This repository is the only supported source of cert-manager charts. There are some other mirrors and copies across the internet, but those are entirely unofficial and could present a security risk. + + ```bash + helm repo add jetstack https://charts.jetstack.io + ``` + +2. Update local Helm Chart repository cache + + ```bash + helm repo update + ``` + +3. Install Cert-Manager addon via helm by running the following: + + ```bash + helm install cert-manager jetstack/cert-manager --namespace cert-manager --version v1.7.0 + ``` + +4. Apply Certificate Issuer YAML File + + ClusterIssuers are Kubernetes resources that represent certificate authorities (CAs) that are able to generate signed certificates by honoring certificate signing requests. All cert-manager certificates require a referenced issuer that is in a ready condition to attempt to honor the request. + The issuer we are using can be found in the `cluster-issuer-prod.yml file` + + ```bash + cluster_issuer_variables=$( + + +## Browse your AKS Deployment Secured via HTTPS + +Run the following command to get the HTTPS endpoint for your application: + +> [!Note] +> It often takes 2-3 minutes for the SSL certificate to propogate and the site to be reachable via HTTPS. + +```bash +runtime="5 minute"; +endtime=$(date -ud "$runtime" +%s); +while [[ $(date -u +%s) -le $endtime ]]; do + STATUS=$(kubectl get svc --namespace=ingress-nginx ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}'); + echo $STATUS; + if [ "$STATUS" == "$MY_STATIC_IP" ]; then + break; + else + sleep 10; + fi; +done +``` + +```bash +echo "You can now visit your web server at https://$FQDN" +``` + +## Next Steps + +- [Azure Kubernetes Service Documentation](https://learn.microsoft.com/en-us/azure/aks/) +- [Create an Azure Container Registry](https://learn.microsoft.com/en-us/azure/aks/tutorial-kubernetes-prepare-acr?tabs=azure-cli) +- [Scale your Applciation in AKS](https://learn.microsoft.com/en-us/azure/aks/tutorial-kubernetes-scale?tabs=azure-cli) +- [Update your application in AKS](https://learn.microsoft.com/en-us/azure/aks/tutorial-kubernetes-app-update?tabs=azure-cli) diff --git a/scenarios/ocd/CreateAKSDeployment/azure-vote-nginx-ssl.yml b/scenarios/ocd/CreateAKSDeployment/azure-vote-nginx-ssl.yml new file mode 100644 index 00000000..070e2014 --- /dev/null +++ b/scenarios/ocd/CreateAKSDeployment/azure-vote-nginx-ssl.yml @@ -0,0 +1,28 @@ +--- +# INGRESS WITH SSL PROD +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: vote-ingress + namespace: default + annotations: + kubernetes.io/tls-acme: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + cert-manager.io/cluster-issuer: letsencrypt-prod +spec: + ingressClassName: nginx + tls: + - hosts: + - $FQDN + secretName: azure-vote-nginx-secret + rules: + - host: $FQDN + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: azure-vote-front + port: + number: 80 diff --git a/scenarios/ocd/CreateAKSDeployment/azure-vote-start.yml b/scenarios/ocd/CreateAKSDeployment/azure-vote-start.yml new file mode 100644 index 00000000..492a394e --- /dev/null +++ b/scenarios/ocd/CreateAKSDeployment/azure-vote-start.yml @@ -0,0 +1,106 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: azure-vote-back + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-back + template: + metadata: + labels: + app: azure-vote-back + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-back + image: docker.io/bitnami/redis:6.0.8 + env: + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 6379 + name: redis +--- +apiVersion: v1 +kind: Service +metadata: + name: azure-vote-back + namespace: default +spec: + ports: + - port: 6379 + selector: + app: azure-vote-back +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: azure-vote-front + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-front + template: + metadata: + labels: + app: azure-vote-front + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-front + image: mcr.microsoft.com/azuredocs/azure-vote-front:v1 + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 80 + env: + - name: REDIS + value: "azure-vote-back" +--- +apiVersion: v1 +kind: Service +metadata: + name: azure-vote-front + namespace: default +spec: + ports: + - port: 80 + selector: + app: azure-vote-front +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: vote-ingress + namespace: default +spec: + ingressClassName: nginx + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: azure-vote-front + port: + number: 80 \ No newline at end of file diff --git a/scenarios/ocd/CreateAKSDeployment/cluster-issuer-prod.yml b/scenarios/ocd/CreateAKSDeployment/cluster-issuer-prod.yml new file mode 100644 index 00000000..e4a37bab --- /dev/null +++ b/scenarios/ocd/CreateAKSDeployment/cluster-issuer-prod.yml @@ -0,0 +1,38 @@ +#!/bin/bash +#kubectl apply -f - <, which is also referenced in step 1 of the video walkthrough. The updated sample is at: . + +## Next Steps + +* [Azure Container Apps Documentation](https://learn.microsoft.com/en-us/azure/container-apps/) +* [Scaling an Azure Container App](https://learn.microsoft.com/en-us/azure/container-apps/scale-app?pivots=azure-cli) +* [Manage Secrets in Container Apps](https://learn.microsoft.com/en-us/azure/container-apps/manage-secrets?tabs=azure-cli) +* [Health Probes in Azure Container Apps](https://learn.microsoft.com/en-us/azure/container-apps/health-probes?tabs=arm-cli) \ No newline at end of file diff --git a/scenarios/ocd/CreateContainerAppDeploymentFromSource/README.md b/scenarios/ocd/CreateContainerAppDeploymentFromSource/README.md new file mode 100644 index 00000000..7c524515 --- /dev/null +++ b/scenarios/ocd/CreateContainerAppDeploymentFromSource/README.md @@ -0,0 +1,635 @@ +# Create a Container App leveraging Blob Store, SQL, and Computer Vision + +In this guide, we'll be walking through deploying the necessary resources for a web app that allows users to cast votes using their name, email and an image. Users can vote for their preference of cat or dog, using an image of a cat or a dog that will be analyzed by our infrastructure. For this to work, we will be deploying resources across several different Azure services: + +- **Azure Storage Account** to store the images +- **Azure Database for PostgreSQL** to store users and votes +- **Azure Computer Vision** to analyze the images for cats or dogs +- **Azure Container App** to deploy our code + +Note: If you've never created a Computer Vision resource before, you will not be able to create one using the Azure CLI. You must create your first Computer Vision resource from the Azure portal to review and acknowledge the Responsible AI terms and conditions. You can do so here: [Create a Computer Vision Resource](https://portal.azure.com/#create/Microsoft.CognitiveServicesComputerVision). After that, you can create subsequent resources using any deployment tool (SDK, CLI, or ARM template, etc) under the same Azure subscription. + +## Define Environment Variables + +The first step in this tutorial is to define environment variables. **Replace the values on the right with your own unique values.** These values will be used throughout the tutorial to create resources and configure the application. Use lowercase and no special characters for the storage account name. + +```bash +export SUFFIX=$(cat /dev/urandom | LC_ALL=C tr -dc 'a-z0-9' | fold -w 8 | head -n 1) +export MY_RESOURCE_GROUP_NAME=rg$SUFFIX +export REGION=westus +export MY_STORAGE_ACCOUNT_NAME=storage$SUFFIX +export MY_DATABASE_SERVER_NAME=dbserver$SUFFIX +export MY_DATABASE_NAME=db$SUFFIX +export MY_DATABASE_USERNAME=dbuser$SUFFIX +export MY_DATABASE_PASSWORD=dbpass$SUFFIX +export MY_COMPUTER_VISION_NAME=computervision$SUFFIX +export MY_CONTAINER_APP_NAME=containerapp$SUFFIX +export MY_CONTAINER_APP_ENV_NAME=containerappenv$SUFFIX +``` + +## Clone the sample repository + +First, we're going to clone this repository onto our local machines. This will provide the starter code required to provide the functionality for the simple application outlined above. We can clone with a simple git command. + +```bash +git clone https://github.com/Azure/computer-vision-nextjs-webapp.git +``` + +To preserve saved environment variables, it's important that this terminal window stays open for the duration of the deployment. + +## Login to Azure using the CLI + +In order to run commands against Azure using [the CLI ](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli)you need to login. This is done though the `az login` command: + +## Create a resource group + +A resource group is a container for related resources. All resources must be placed in a resource group. We will create one for this tutorial. The following command creates a resource group with the previously defined $MY_RESOURCE_GROUP_NAME and $REGION parameters. + +```bash +az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION +``` + +Results: + + +```json +{ + "id": "/subscriptions/ab9d8365-2f65-47a4-8df4-7e40db70c8d2/resourceGroups/$MY_RESOURCE_GROUP_NAME", + "location": "$REGION", + "managedBy": null, + "name": "$MY_RESOURCE_GROUP_NAME", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null, + "type": "Microsoft.Resources/resourceGroups" +} +``` + +## Create the storage account + +To create a storage account in this resource group we need to run a simple command. To this command, we are passing the name of the storage account, the resource group to deploy it in, the physical region to deploy it in, and the SKU of the storage account. All values are configured using environment variables. + +```bash +az storage account create --name $MY_STORAGE_ACCOUNT_NAME --resource-group $MY_RESOURCE_GROUP_NAME --location $REGION --sku Standard_LRS +``` + +Results: + + +```json +{ + "accessTier": "Hot", + "allowBlobPublicAccess": false, + "allowCrossTenantReplication": null, + "allowSharedKeyAccess": null, + "allowedCopyScope": null, + "azureFilesIdentityBasedAuthentication": null, + "blobRestoreStatus": null, + "creationTime": "2023-08-10T14:37:41.276351+00:00", + "customDomain": null, + "defaultToOAuthAuthentication": null, + "dnsEndpointType": null, + "enableHttpsTrafficOnly": true, + "enableNfsV3": null, + "encryption": { + "encryptionIdentity": null, + "keySource": "Microsoft.Storage", + "keyVaultProperties": null, + "requireInfrastructureEncryption": null, + "services": { + "blob": { + "enabled": true, + "keyType": "Account", + "lastEnabledTime": "2023-08-10T14:37:41.370163+00:00" + }, + "file": { + "enabled": true, + "keyType": "Account", + "lastEnabledTime": "2023-08-10T14:37:41.370163+00:00" + }, + "queue": null, + "table": null + } + }, + "extendedLocation": null, + "failoverInProgress": null, + "geoReplicationStats": null, + "id": "/subscriptions/ab9d8365-2f65-47a4-8df4-7e40db70c8d2/resourceGroups/$MY_RESOURCE_GROUP_NAME/providers/Microsoft.Storage/storageAccounts/$MY_STORAGE_ACCOUNT_NAME", + "identity": null, + "immutableStorageWithVersioning": null, + "isHnsEnabled": null, + "isLocalUserEnabled": null, + "isSftpEnabled": null, + "keyCreationTime": { + "key1": "2023-08-10T14:37:41.370163+00:00", + "key2": "2023-08-10T14:37:41.370163+00:00" + }, + "keyPolicy": null, + "kind": "StorageV2", + "largeFileSharesState": null, + "lastGeoFailoverTime": null, + "location": "$REGION", + "minimumTlsVersion": "TLS1_0", + "name": "$MY_STORAGE_ACCOUNT_NAME", + "networkRuleSet": { + "bypass": "AzureServices", + "defaultAction": "Allow", + "ipRules": [], + "resourceAccessRules": null, + "virtualNetworkRules": [] + }, + "primaryEndpoints": { + "blob": "https://$MY_STORAGE_ACCOUNT_NAME.blob.core.windows.net/", + "dfs": "https://$MY_STORAGE_ACCOUNT_NAME.dfs.core.windows.net/", + "file": "https://$MY_STORAGE_ACCOUNT_NAME.file.core.windows.net/", + "internetEndpoints": null, + "microsoftEndpoints": null, + "queue": "https://$MY_STORAGE_ACCOUNT_NAME.queue.core.windows.net/", + "table": "https://$MY_STORAGE_ACCOUNT_NAME.table.core.windows.net/", + "web": "https://$MY_STORAGE_ACCOUNT_NAME.z22.web.core.windows.net/" + }, + "primaryLocation": "$REGION", + "privateEndpointConnections": [], + "provisioningState": "Succeeded", + "publicNetworkAccess": null, + "resourceGroup": "$MY_RESOURCE_GROUP_NAME", + "routingPreference": null, + "sasPolicy": null, + "secondaryEndpoints": null, + "secondaryLocation": null, + "sku": { + "name": "Standard_LRS", + "tier": "Standard" + }, + "statusOfPrimary": "available", + "statusOfSecondary": null, + "storageAccountSkuConversionStatus": null, + "tags": {}, + "type": "Microsoft.Storage/storageAccounts" +} +``` + +We also need to store one of the API keys for the storage account into an environment variable for later use (to create a container, and put it into an environment file for the code). We are calling the `keys list` command on the storage account and storing the first one in a `STORAGE_ACCOUNT_KEY` environment variable. + +```bash +export STORAGE_ACCOUNT_KEY=$(az storage account keys list --account-name $MY_STORAGE_ACCOUNT_NAME --resource-group $MY_RESOURCE_GROUP_NAME --query "[0].value" --output tsv) +``` + +## Create a container in the storage account + +Run the following command to create an `images` container in the storage account we just created. User uploaded images will be stored as blobs in this container. + +```bash +az storage container create --name images --account-name $MY_STORAGE_ACCOUNT_NAME --account-key $STORAGE_ACCOUNT_KEY --public-access blob +``` + +Results: + + +```json +{ + "created": true +} +``` + +## Create a database + +We will be creating an Azure Database for PostgreSQL flexible server for the application to store users and their votes. We are passing several arguments to the `create` command: + +- The basics: database name, resource group, and physical region to deploy in. +- The tier (which determines the capabilities of the server) as `burstable`, which is for workloads that don't need full CPU continuously. +- The SKU as `Standard_B1ms`. + - `Standard` for the performance tier. + - `B` for burstable workload. + - `1` for a single vCore. + - `ms` for memory optimized. +- The storage size, 32 GiB +- The PostgreSQL major version, 15 +- The datatabase credentials: username and password + +```bash +az postgres flexible-server create \ + --name $MY_DATABASE_SERVER_NAME \ + --database-name $MY_DATABASE_NAME \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --location $REGION \ + --tier Burstable \ + --sku-name Standard_B1ms \ + --storage-size 32 \ + --version 15 \ + --admin-user $MY_DATABASE_USERNAME \ + --admin-password $MY_DATABASE_PASSWORD \ + --yes +``` + +Results: + + +```json +{ + "connectionString": "postgresql://$MY_DATABASE_USERNAME:$MY_DATABASE_PASSWORD@$MY_DATABASE_NAME.postgres.database.azure.com/flexibleserverdb?sslmode=require", + "databaseName": "$MY_DATABASE_NAME", + "firewallName": "FirewallIPAddress_2023-8-10_10-53-21", + "host": "$MY_DATABASE_NAME.postgres.database.azure.com", + "id": "/subscriptions/ab9d8365-2f65-47a4-8df4-7e40db70c8d2/resourceGroups/$MY_RESOURCE_GROUP_NAME/providers/Microsoft.DBforPostgreSQL/flexibleServers/$MY_DATABASE_NAME", + "location": "$REGION", + "password": "$MY_DATABASE_PASSWORD", + "resourceGroup": "$MY_RESOURCE_GROUP_NAME", + "skuname": "Standard_B1ms", + "username": "$MY_DATABASE_USERNAME", + "version": "15" +} +``` + +We also need to store the connection string to the database into an environment variable for later use. This URL will allow us to access the database within the resource we just created. + +```bash +export DATABASE_URL="postgres://$MY_DATABASE_USERNAME:$MY_DATABASE_PASSWORD@$MY_DATABASE_SERVER_NAME.postgres.database.azure.com/$MY_DATABASE_NAME" +``` + +## Create a Computer Vision resource + +We will be creating a Computer Vision resource to be able to identify cats or dogs in the pictures users upload. Creating a Computer Vision resource can be done with a single command. We are passing several arguments to the `create` command: + +- The basics: resource name, resource group, the region, and to create a Computer Vision resource. +- The SKU as `S1`, or the most cost-effective paid performance tier. + +```bash +az cognitiveservices account create \ + --name $MY_COMPUTER_VISION_NAME \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --location $REGION \ + --kind ComputerVision \ + --sku S1 \ + --yes +``` + +Results: + + +```json +{ + "etag": "\"090ac83c-0000-0700-0000-64d4fcd80000\"", + "id": "/subscriptions/ab9d8365-2f65-47a4-8df4-7e40db70c8d2/resourceGroups/$MY_RESOURCE_GROUP_NAME/providers/Microsoft.CognitiveServices/accounts/$MY_COMPUTER_VISION_NAME", + "identity": null, + "kind": "ComputerVision", + "location": "$REGION", + "name": "$MY_COMPUTER_VISION_NAME", + "properties": { + "allowedFqdnList": null, + "apiProperties": null, + "callRateLimit": { + "count": null, + "renewalPeriod": null, + "rules": [ + { + "count": 30.0, + "dynamicThrottlingEnabled": true, + "key": "vision.recognizeText", + "matchPatterns": [ + { + "method": "POST", + "path": "vision/recognizeText" + }, + { + "method": "GET", + "path": "vision/textOperations/*" + }, + { + "method": "*", + "path": "vision/read/*" + } + ], + "minCount": null, + "renewalPeriod": 1.0 + }, + { + "count": 15.0, + "dynamicThrottlingEnabled": true, + "key": "vision", + "matchPatterns": [ + { + "method": "*", + "path": "vision/*" + } + ], + "minCount": null, + "renewalPeriod": 1.0 + }, + { + "count": 500.0, + "dynamicThrottlingEnabled": null, + "key": "container.billing", + "matchPatterns": [ + { + "method": "*", + "path": "billing/*" + } + ], + "minCount": null, + "renewalPeriod": 10.0 + }, + { + "count": 20.0, + "dynamicThrottlingEnabled": true, + "key": "default", + "matchPatterns": [ + { + "method": "*", + "path": "*" + } + ], + "minCount": null, + "renewalPeriod": 1.0 + } + ] + }, + "capabilities": [ + { + "name": "DynamicThrottling", + "value": null + }, + { + "name": "VirtualNetworks", + "value": null + }, + { + "name": "Container", + "value": "ComputerVision.VideoAnalytics,ComputerVision.ComputerVisionRead,ComputerVision.ocr,ComputerVision.readfile,ComputerVision.readfiledsd,ComputerVision.recognizetext,ComputerVision.ComputerVision,ComputerVision.ocrlayoutworker,ComputerVision.ocrcontroller,ComputerVision.ocrdispatcher,ComputerVision.ocrbillingprocessor,ComputerVision.ocranalyzer,ComputerVision.ocrpagesplitter,ComputerVision.ocrapi,ComputerVision.ocrengineworker" + } + ], + "customSubDomainName": null, + "dateCreated": "2023-08-10T15:06:00.4272845Z", + "deletionDate": null, + "disableLocalAuth": null, + "dynamicThrottlingEnabled": null, + "encryption": null, + "endpoint": "https://$REGION.api.cognitive.microsoft.com/", + "endpoints": { + "Computer Vision": "https://$REGION.api.cognitive.microsoft.com/", + "Container": "https://$REGION.api.cognitive.microsoft.com/" + }, + "internalId": "93645816f9594fe49a8f4023c0bf34b4", + "isMigrated": false, + "migrationToken": null, + "networkAcls": null, + "privateEndpointConnections": [], + "provisioningState": "Succeeded", + "publicNetworkAccess": "Enabled", + "quotaLimit": null, + "restore": null, + "restrictOutboundNetworkAccess": null, + "scheduledPurgeDate": null, + "skuChangeInfo": null, + "userOwnedStorage": null + }, + "resourceGroup": "$MY_RESOURCE_GROUP_NAME", + "sku": { + "capacity": null, + "family": null, + "name": "S1", + "size": null, + "tier": null + }, + "systemData": { + "createdAt": "2023-08-10T15:06:00.107300+00:00", + "createdBy": "username@domain.com", + "createdByType": "User", + "lastModifiedAt": "2023-08-10T15:06:00.107300+00:00", + "lastModifiedBy": "username@domain.com", + "lastModifiedByType": "User" + }, + "tags": null, + "type": "Microsoft.CognitiveServices/accounts" +} +``` + +To access our computer vision resource, we need both the endpoint and the key. With the Azure CLI, we have access to two `az cognitiveservices account` commands: `show` and `keys list`, which give us what we need. + +```bash +export COMPUTER_VISION_ENDPOINT=$(az cognitiveservices account show --name $MY_COMPUTER_VISION_NAME --resource-group $MY_RESOURCE_GROUP_NAME --query "properties.endpoint" --output tsv) +export COMPUTER_VISION_KEY=$(az cognitiveservices account keys list --name $MY_COMPUTER_VISION_NAME --resource-group $MY_RESOURCE_GROUP_NAME --query "key1" --output tsv) +``` + +## Deploy the code into a Container App + +Now that we've got our storage, database, and Computer Vision resources all set up, we are ready to deploy the application code. To do this, we're going to use Azure Container Apps to host a containerized build of our Next.js app. The `Dockerfile` is already created at the root of the repository, so all we need to do is run a single command to deploy the code. Before running this command, we first need to install the containerapp extension for the Azure CLI. + +```bash +az extension add --upgrade -n containerapp +``` + +This command will create an Azure Container Registry resource to host our Docker image, an Azure Container App resource which runs the image, and an Azure Container App Environment resource for our image. Let's break down what we're passing into the command. + +- The basics: resource name, resource group, and the region +- The name of the Azure Container App Environment resource to use or create +- The path to the source code + +```bash +az containerapp up \ + --name $MY_CONTAINER_APP_NAME \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --location $REGION \ + --environment $MY_CONTAINER_APP_ENV_NAME \ + --context-path computer-vision-nextjs-webapp \ + --source computer-vision-nextjs-webapp \ + --target-port 3000 \ + --ingress external \ + --env-vars \ + AZURE_DATABASE_URL=$DATABASE_URL \ + AZURE_COMPUTER_VISION_KEY=$COMPUTER_VISION_KEY \ + AZURE_COMPUTER_VISION_ENDPOINT=$COMPUTER_VISION_ENDPOINT \ + AZURE_STORAGE_ACCOUNT_NAME=$MY_STORAGE_ACCOUNT_NAME \ + AZURE_STORAGE_ACCOUNT_KEY=$STORAGE_ACCOUNT_KEY +``` + +We can verify that the command was successful by using: + +```bash +az containerapp show --name $MY_CONTAINER_APP_NAME --resource-group $MY_RESOURCE_GROUP_NAME +``` + +Results: + + +```json +{ + "id": "/subscriptions/fake3265-2f64-47a4-8df4-7e41ab70c8dh/resourceGroups/$MY_RESOURCE_GROUP_NAME/providers/Microsoft.App/containerapps/$MY_CONTAINER_APP_NAME", + "identity": { + "type": "None" + }, + "location": "West US", + "name": "$MY_CONTAINER_APP_NAME", + "properties": { + "configuration": { + "activeRevisionsMode": "Single", + "dapr": null, + "ingress": { + "allowInsecure": false, + "clientCertificateMode": null, + "corsPolicy": null, + "customDomains": null, + "exposedPort": 0, + "external": true, + "fqdn": "$MY_CONTAINER_APP_NAME.kindocean-a506af76.$REGION.azurecontainerapps.io", + "ipSecurityRestrictions": null, + "stickySessions": null, + "targetPort": 3000, + "traffic": [ + { + "latestRevision": true, + "weight": 100 + } + ], + "transport": "Auto" + }, + "maxInactiveRevisions": null, + "registries": null, + "secrets": null, + "service": null + }, + "customDomainVerificationId": "06C64CD176439F8B6CCBBE1B531758828A5CACEABFB30B4DC9750641532924F6", + "environmentId": "/subscriptions/fake3265-2f64-47a4-8df4-7e41ab70c8dh/resourceGroups/$MY_RESOURCE_GROUP_NAME/providers/Microsoft.App/managedEnvironments/$MY_CONTAINER_APP_ENV_NAME", + "eventStreamEndpoint": "https://$REGION.azurecontainerapps.dev/subscriptions/eb9d8265-2f64-47a4-8df4-7e41db70c8d8/resourceGroups/$MY_RESOURCE_GROUP_NAME/containerApps/$MY_CONTAINER_APP_NAME/eventstream", + "latestReadyRevisionName": "$MY_CONTAINER_APP_NAME--jl6fh75", + "latestRevisionFqdn": "$MY_CONTAINER_APP_NAME--jl6fh75.kindocean-a506af76.$REGION.azurecontainerapps.io", + "latestRevisionName": "$MY_CONTAINER_APP_NAME--jl6fh75", + "managedEnvironmentId": "/subscriptions/eb9d8265-2f64-47a4-8df4-7e41db70c8d8/resourceGroups/$MY_RESOURCE_GROUP_NAME/providers/Microsoft.App/managedEnvironments/$MY_CONTAINER_APP_ENV_NAME", + "outboundIpAddresses": ["20.237.221.47"], + "provisioningState": "Succeeded", + "runningStatus": "Running", + "template": { + "containers": [ + { + "env": [ + { + "name": "AZURE_DATABASE_URL", + "value": "$DATABASE_URL" + }, + { + "name": "AZURE_COMPUTER_VISION_KEY", + "value": "$COMPUTER_VISION_KEY" + }, + { + "name": "AZURE_COMPUTER_VISION_ENDPOINT", + "value": "$COMPUTER_VISION_ENDPOINT" + }, + { + "name": "AZURE_STORAGE_ACCOUNT_NAME", + "value": "$MY_STORAGE_ACCOUNT_NAME" + }, + { + "name": "AZURE_STORAGE_ACCOUNT_KEY", + "value": "$STORAGE_ACCOUNT_KEY" + } + ], + "image": "ralphr123/cn-app", + "name": "$MY_CONTAINER_APP_NAME", + "resources": { + "cpu": 0.5, + "ephemeralStorage": "2Gi", + "memory": "1Gi" + } + } + ], + "initContainers": null, + "revisionSuffix": "", + "scale": { + "maxReplicas": 10, + "minReplicas": null, + "rules": null + }, + "serviceBinds": null, + "terminationGracePeriodSeconds": null, + "volumes": null + }, + "workloadProfileName": null + }, + "resourceGroup": "$MY_RESOURCE_GROUP_NAME", + "systemData": { + "createdAt": "2023-08-10T21:50:07.2125698", + "createdBy": "username@domain.com", + "createdByType": "User", + "lastModifiedAt": "2023-08-10T21:50:07.2125698", + "lastModifiedBy": "username@domain.com", + "lastModifiedByType": "User" + }, + "type": "Microsoft.App/containerApps" +} +``` + +## Create a database firewall rule + +By default, our database is configured to allow traffic from an allowlist of IP addresses. We need to add the IP of our newly deployed Container App to this allowlist. We can get the IP from the `az containerapp show` command. + +```bash +export CONTAINER_APP_IP=$(az containerapp show --name $MY_CONTAINER_APP_NAME --resource-group $MY_RESOURCE_GROUP_NAME --query "properties.outboundIpAddresses[0]" --output tsv) +``` + +We can now add this IP as a firewall rule with this command: + +```bash +az postgres flexible-server firewall-rule create \ + --name $MY_DATABASE_SERVER_NAME \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --rule-name allow-container-app \ + --start-ip-address $CONTAINER_APP_IP \ + --end-ip-address $CONTAINER_APP_IP +``` + +Results: + + +```json +{ + "endIpAddress": "20.237.221.47", + "id": "/subscriptions/ab9d8365-2f65-47a4-8df4-7e40db70c8d2/resourceGroups/$MY_RESOURCE_GROUP_NAME/providers/Microsoft.DBforPostgreSQL/flexibleServers/$MY_DATABASE_SERVER_NAME/firewallRules/allow-container-app", + "name": "allow-container-app", + "resourceGroup": "$MY_RESOURCE_GROUP_NAME", + "startIpAddress": "20.237.221.47", + "systemData": null, + "type": "Microsoft.DBforPostgreSQL/flexibleServers/firewallRules" +} +``` + +## Create a storage CORS rule + +Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain. CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. We need to add a CORS rule on the URL of our web app to our storage account. First, let's get the URL with a similar `az containerapp show` command as earlier. + +```bash +export CONTAINER_APP_URL=https://$(az containerapp show --name $MY_CONTAINER_APP_NAME --resource-group $MY_RESOURCE_GROUP_NAME --query "properties.configuration.ingress.fqdn" --output tsv) +``` + +Next, we're ready to add a CORS rule with the following command. Let's break down the different parts of this command. + +- We are specifying blob service as the storage type to add the rule to. +- We are allowing all operations to be performed. +- We are allowing only the container app URL we just saved. +- We are allowing all HTTP headers from this URL. +- Max age is the amount of time, in seconds, that a browser should cache the preflight response for a specific request. +- We are passing the storage account name and key from earlier. + +```bash +az storage cors add \ + --services b \ + --methods DELETE GET HEAD MERGE OPTIONS POST PUT PATCH \ + --origins $CONTAINER_APP_URL \ + --allowed-headers '*' \ + --max-age 3600 \ + --account-name $MY_STORAGE_ACCOUNT_NAME \ + --account-key $STORAGE_ACCOUNT_KEY +``` + +That's it! Feel free to access the newly deployed web app in your browser printing the CONTAINER_APP_URL environment variable we added earlier. + +```bash +echo $CONTAINER_APP_URL +``` + +## Next Steps + +- [Azure Container Apps documentation](https://learn.microsoft.com/en-us/azure/container-apps/) +- [Azure Database for PostgreSQL documentation](https://learn.microsoft.com/en-us/azure/postgresql/) +- [Azure Blob Storage documentation](https://learn.microsoft.com/en-us/azure/storage/blobs/) +- [Azure Computer (AI) Vision Documentation](https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/) diff --git a/scenarios/ocd/CreateLinuxVMAndSSH/README.md b/scenarios/ocd/CreateLinuxVMAndSSH/README.md new file mode 100644 index 00000000..20fc414b --- /dev/null +++ b/scenarios/ocd/CreateLinuxVMAndSSH/README.md @@ -0,0 +1,120 @@ +# Create a Linux VM and SSH On Azure + +## Define Environment Variables + +The First step in this tutorial is to define environment variables. + +```bash +export RANDOM_ID="$(openssl rand -hex 3)" +export MY_RESOURCE_GROUP_NAME="myVMResourceGroup$RANDOM_ID" +export REGION=EastUS +export MY_VM_NAME="myVM$RANDOM_ID" +export MY_USERNAME=azureuser +export MY_VM_IMAGE="Canonical:0001-com-ubuntu-minimal-jammy:minimal-22_04-lts-gen2:latest" +``` + +# Login to Azure using the CLI + +In order to run commands against Azure using the CLI you need to login. This is done, very simply, though the `az login` command: + +# Create a resource group + +A resource group is a container for related resources. All resources must be placed in a resource group. We will create one for this tutorial. The following command creates a resource group with the previously defined $MY_RESOURCE_GROUP_NAME and $REGION parameters. + +```bash +az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION +``` + +Results: + + +```json +{ + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myVMResourceGroup", + "location": "eastus", + "managedBy": null, + "name": "myVMResourceGroup", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null, + "type": "Microsoft.Resources/resourceGroups" +} +``` + +## Create the Virtual Machine + +To create a VM in this resource group we need to run a simple command, here we have provided the `--generate-ssh-keys` flag, this will cause the CLI to look for an avialable ssh key in `~/.ssh`, if one is found it will be used, otherwise one will be generated and stored in `~/.ssh`. We also provide the `--public-ip-sku Standard` flag to ensure that the machine is accessible via a public IP. Finally, we are deploying the latest `Ubuntu 22.04` image. + +All other values are configured using environment variables. + +```bash +az vm create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_VM_NAME \ + --image $MY_VM_IMAGE \ + --admin-username $MY_USERNAME \ + --assign-identity \ + --generate-ssh-keys \ + --public-ip-sku Standard +``` + +Results: + + +```json +{ + "fqdns": "", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myVMResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM", + "location": "eastus", + "macAddress": "00-0D-3A-10-4F-70", + "powerState": "VM running", + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "52.147.208.85", + "resourceGroup": "myVMResourceGroup", + "zones": "" +} +``` + +### Enable Azure AD login for a Linux Virtual Machine in Azure + +The following example has deploys a Linux VM and then installs the extension to enable Azure AD login for a Linux VM. VM extensions are small applications that provide post-deployment configuration and automation tasks on Azure virtual machines. + +```bash +az vm extension set \ + --publisher Microsoft.Azure.ActiveDirectory \ + --name AADSSHLoginForLinux \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --vm-name $MY_VM_NAME +``` + +# Store IP Address of VM in order to SSH +run the following command to get the IP Address of the VM and store it as an environment variable + +```bash +export IP_ADDRESS=$(az vm show --show-details --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_VM_NAME --query publicIps --output tsv) +``` + +# SSH Into VM + + + + + +You can now SSH into the VM by running the output of the following command in your ssh client of choice + +```bash +ssh -o StrictHostKeyChecking=no $MY_USERNAME@$IP_ADDRESS +``` + +# Next Steps + +* [VM Documentation](https://learn.microsoft.com/en-us/azure/virtual-machines/) +* [Use Cloud-Init to initialize a Linux VM on first boot](https://learn.microsoft.com/en-us/azure/virtual-machines/linux/tutorial-automate-vm-deployment) +* [Create custom VM images](https://learn.microsoft.com/en-us/azure/virtual-machines/linux/tutorial-custom-images) +* [Load Balance VMs](https://learn.microsoft.com/en-us/azure/load-balancer/quickstart-load-balancer-standard-public-cli) diff --git a/scenarios/ocd/CreateLinuxVMLAMP/README.md b/scenarios/ocd/CreateLinuxVMLAMP/README.md new file mode 100644 index 00000000..956e05f5 --- /dev/null +++ b/scenarios/ocd/CreateLinuxVMLAMP/README.md @@ -0,0 +1,819 @@ +# Install a LEMP stack on Azure + +This article walks you through how to deploy an NGINX web server, Azure MySQL Flexible Server, and PHP (the LEMP stack) on an Ubuntu Linux VM in Azure. To see the LEMP server in action, you can optionally install and configure a WordPress site. In this tutorial you learn how to: + +> [!div class="checklist"] + +> * Create a Linux Ubuntu VM +> * Open ports 80 and 443 for web traffic +> * Install and Secure NGINX, Azure Flexible MySQL Server, and PHP +> * Verify installation and configuration +> * Install WordPress + +## Variable declaration + +First we will define a few variables that will help with the configuration of the LEMP workload. + +```bash +export NETWORK_PREFIX="$(($RANDOM % 254 + 1))" +export RANDOM_ID="$(openssl rand -hex 3)" +export MY_RESOURCE_GROUP_NAME="myLEMPResourceGroup$RANDOM_ID" +export REGION="eastus" +export MY_VM_NAME="myVM$RANDOM_ID" +export MY_VM_USERNAME="azureadmin" +export MY_VM_SIZE='Standard_DS2_v2' +export MY_VM_IMAGE='Canonical:0001-com-ubuntu-minimal-jammy:minimal-22_04-lts-gen2:latest' +export MY_PUBLIC_IP_NAME="myPublicIP$RANDOM_ID" +export MY_DNS_LABEL="mydnslabel$RANDOM_ID" +export MY_NSG_NAME="myNSG$RANDOM_ID" +export MY_NSG_SSH_RULE="Allow-Access$RANDOM_ID" +export MY_VM_NIC_NAME="myVMNic$RANDOM_ID" +export MY_VNET_NAME="myVNet$RANDOM_ID" +export MY_VNET_PREFIX="10.$NETWORK_PREFIX.0.0/22" +export MY_SN_NAME="mySN$RANDOM_ID" +export MY_SN_PREFIX="10.$NETWORK_PREFIX.0.0/24" +export MY_MYSQL_DB_NAME="mydb$RANDOM_ID" +export MY_MYSQL_ADMIN_USERNAME="dbadmin$RANDOM_ID" +export MY_MYSQL_ADMIN_PW="$(openssl rand -base64 32)" +export MY_MYSQL_SN_NAME="myMySQLSN$RANDOM_ID" +export MY_WP_ADMIN_PW="$(openssl rand -base64 32)" +export MY_WP_ADMIN_USER="wpcliadmin" +export MY_AZURE_USER=$(az account show --query user.name --output tsv) +export FQDN="${MY_DNS_LABEL}.${REGION}.cloudapp.azure.com" +``` + + + +## Create RG + +Create a resource group with the [az group create](https://learn.microsoft.com/cli/azure/group#az-group-create) command. An Azure resource group is a logical container into which Azure resources are deployed and managed. +The following example creates a resource group named `$MY_RESOURCE_GROUP_NAME` in the `eastus` location. + +```bash +az group create \ + --name $MY_RESOURCE_GROUP_NAME \ + --location $REGION -o JSON +``` + +Results: + + +```JSON +{ + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx", + "location": "eastus", + "managedBy": null, + "name": "myLEMPResourceGroupxxxxxx", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null, + "type": "Microsoft.Resources/resourceGroups" +} +``` + +## Setup LEMP networking + +## Create an Azure Virtual Network + +A virtual network is the fundamental building block for private networks in Azure. Azure Virtual Network enables Azure resources like VMs to securely communicate with each other and the internet. +Use [az network vnet create](https://learn.microsoft.com/cli/azure/network/vnet#az-network-vnet-create) to create a virtual network named `$MY_VNET_NAME` with a subnet named `$MY_SN_NAME` in the `$MY_RESOURCE_GROUP_NAME` resource group. + +```bash +az network vnet create \ + --name $MY_VNET_NAME \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --location $REGION \ + --address-prefix $MY_VNET_PREFIX \ + --subnet-name $MY_SN_NAME \ + --subnet-prefixes $MY_SN_PREFIX -o JSON +``` + +Results: + + +```JSON +{ + "newVNet": { + "addressSpace": { + "addressPrefixes": [ + "10.19.0.0/22" + ] + }, + "enableDdosProtection": false, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.Network/virtualNetworks/myVNetxxxxxx", + "location": "eastus", + "name": "myVNetxxxxxx", + "provisioningState": "Succeeded", + "resourceGroup": "myLEMPResourceGroupxxxxxx", + "subnets": [ + { + "addressPrefix": "10.19.0.0/24", + "delegations": [], + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.Network/virtualNetworks/myVNetxxxxxx/subnets/mySNxxxxxx", + "name": "mySNxxxxxx", + "privateEndpointNetworkPolicies": "Disabled", + "privateLinkServiceNetworkPolicies": "Enabled", + "provisioningState": "Succeeded", + "resourceGroup": "myLEMPResourceGroupxxxxxx", + "type": "Microsoft.Network/virtualNetworks/subnets" + } + ], + "type": "Microsoft.Network/virtualNetworks", + "virtualNetworkPeerings": [] + } +} +``` + +## Create an Azure Public IP + +Use [az network public-ip create](https://learn.microsoft.com/cli/azure/network/public-ip#az-network-public-ip-create) to create a standard zone-redundant public IPv4 address named `MY_PUBLIC_IP_NAME` in `$MY_RESOURCE_GROUP_NAME`. + +>[!NOTE] +>The below options for zones are only valid selections in regions with [Availability Zones](https://learn.microsoft.com/azure/reliability/availability-zones-service-support). + +```bash +az network public-ip create \ + --name $MY_PUBLIC_IP_NAME \ + --location $REGION \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --dns-name $MY_DNS_LABEL \ + --sku Standard \ + --allocation-method static \ + --version IPv4 \ + --zone 1 2 3 -o JSON +``` + +Results: + + +```JSON +{ + "publicIp": { + "ddosSettings": { + "protectionMode": "VirtualNetworkInherited" + }, + "dnsSettings": { + "domainNameLabel": "mydnslabelxxxxxx", + "fqdn": "mydnslabelxxxxxx.eastus.cloudapp.azure.com" + }, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.Network/publicIPAddresses/myPublicIPxxxxxx", + "idleTimeoutInMinutes": 4, + "ipTags": [], + "location": "eastus", + "name": "myPublicIPxxxxxx", + "provisioningState": "Succeeded", + "publicIPAddressVersion": "IPv4", + "publicIPAllocationMethod": "Static", + "resourceGroup": "myLEMPResourceGroupxxxxxx", + "sku": { + "name": "Standard", + "tier": "Regional" + }, + "type": "Microsoft.Network/publicIPAddresses", + "zones": [ + "1", + "2", + "3" + ] + } +} +``` + +## Create an Azure Network Security Group + +Security rules in network security groups enable you to filter the type of network traffic that can flow in and out of virtual network subnets and network interfaces. To learn more about network security groups, see [Network security group overview](https://learn.microsoft.com/azure/virtual-network/network-security-groups-overview). + +```bash +az network nsg create \ + --name $MY_NSG_NAME \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --location $REGION -o JSON +``` + +Results: + + +```JSON +{ + "NewNSG": { + "defaultSecurityRules": + { + "access": "Allow", + "description": "Allow inbound traffic from all VMs in VNET", + "destinationAddressPrefix": "VirtualNetwork", + "destinationAddressPrefixes": [], + "destinationPortRange": "*", + "destinationPortRanges": [], + "direction": "Inbound", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroup104/providers/Microsoft.Network/networkSecurityGroups/protect-vms/defaultSecurityRules/AllowVnetInBound", + "name": "AllowVnetInBound", + "priority": 65000, + "protocol": "*", + "provisioningState": "Succeeded", + "resourceGroup": "myLEMPResourceGroup104", + "sourceAddressPrefix": "VirtualNetwork", + "sourceAddressPrefixes": [], + "sourcePortRange": "*", + "sourcePortRanges": [], + "type": "Microsoft.Network/networkSecurityGroups/defaultSecurityRules" + }, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroup104/providers/Microsoft.Network/networkSecurityGroups/protect-vms", + "location": "eastus", + "name": "protect-vms", + "provisioningState": "Succeeded", + "resourceGroup": "myLEMPResourceGroup104", + "securityRules": [], + "type": "Microsoft.Network/networkSecurityGroups" + } +} +``` + +## Create Azure Network Security Group rules + +You'll create a rule to allow connections to the virtual machine on port 22 for SSH and ports 80, 443 for HTTP and HTTPS. An extra rule is created to allow all ports for outbound connections. Use [az network nsg rule create](https://learn.microsoft.com/cli/azure/network/nsg/rule#az-network-nsg-rule-create) to create a network security group rule. + +```bash +az network nsg rule create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --nsg-name $MY_NSG_NAME \ + --name $MY_NSG_SSH_RULE \ + --access Allow \ + --protocol Tcp \ + --direction Inbound \ + --priority 100 \ + --source-address-prefix '*' \ + --source-port-range '*' \ + --destination-address-prefix '*' \ + --destination-port-range 22 80 443 -o JSON +``` + +Results: + + +```JSON +{ + "access": "Allow", + "destinationAddressPrefix": "*", + "destinationAddressPrefixes": [], + "destinationPortRanges": [ + "22", + "80", + "443" + ], + "direction": "Inbound", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.Network/networkSecurityGroups/myNSGNamexxxxxx/securityRules/Allow-Accessxxxxxx", + "name": "Allow-Accessxxxxxx", + "priority": 100, + "protocol": "Tcp", + "provisioningState": "Succeeded", + "resourceGroup": "myLEMPResourceGroupxxxxxx", + "sourceAddressPrefix": "*", + "sourceAddressPrefixes": [], + "sourcePortRange": "*", + "sourcePortRanges": [], + "type": "Microsoft.Network/networkSecurityGroups/securityRules" +} +``` + +## Create an Azure Network Interface + +You'll use [az network nic create](https://learn.microsoft.com/cli/azure/network/nic#az-network-nic-create) to create the network interface for the virtual machine. The public IP addresses and the NSG created previously are associated with the NIC. The network interface is attached to the virtual network you created previously. + +```bash +az network nic create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_VM_NIC_NAME \ + --location $REGION \ + --ip-forwarding false \ + --subnet $MY_SN_NAME \ + --vnet-name $MY_VNET_NAME \ + --network-security-group $MY_NSG_NAME \ + --public-ip-address $MY_PUBLIC_IP_NAME -o JSON +``` + +Results: + + +```JSON +{ + "NewNIC": { + "auxiliaryMode": "None", + "auxiliarySku": "None", + "disableTcpStateTracking": false, + "dnsSettings": { + "appliedDnsServers": [], + "dnsServers": [] + }, + "enableAcceleratedNetworking": false, + "enableIPForwarding": false, + "hostedWorkloads": [], + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.Network/networkInterfaces/myVMNicNamexxxxxx", + "ipConfigurations": [ + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.Network/networkInterfaces/myVMNicNamexxxxxx/ipConfigurations/ipconfig1", + "name": "ipconfig1", + "primary": true, + "privateIPAddress": "10.19.0.4", + "privateIPAddressVersion": "IPv4", + "privateIPAllocationMethod": "Dynamic", + "provisioningState": "Succeeded", + "resourceGroup": "myLEMPResourceGroupxxxxxx", + "subnet": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.Network/virtualNetworks/myVNetxxxxxx/subnets/mySNxxxxxx", + "resourceGroup": "myLEMPResourceGroupxxxxxx" + }, + "type": "Microsoft.Network/networkInterfaces/ipConfigurations" + } + ], + "location": "eastus", + "name": "myVMNicNamexxxxxx", + "networkSecurityGroup": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.Network/networkSecurityGroups/myNSGNamexxxxxx", + "resourceGroup": "myLEMPResourceGroupxxxxxx" + }, + "nicType": "Standard", + "provisioningState": "Succeeded", + "resourceGroup": "myLEMPResourceGroupxxxxxx", + "tapConfigurations": [], + "type": "Microsoft.Network/networkInterfaces", + "vnetEncryptionSupported": false + } +} +``` + +## Cloud-init overview + +Cloud-init is a widely used approach to customize a Linux VM as it boots for the first time. You can use cloud-init to install packages and write files, or to configure users and security. As cloud-init runs during the initial boot process, there are no additional steps or required agents to apply your configuration. + +Cloud-init also works across distributions. For example, you don't use apt-get install or yum install to install a package. Instead you can define a list of packages to install. Cloud-init automatically uses the native package management tool for the distro you select. + +We are working with our partners to get cloud-init included and working in the images that they provide to Azure. For detailed information cloud-init support for each distribution, see [Cloud-init support for VMs in Azure](https://learn.microsoft.com/azure/virtual-machines/linux/using-cloud-init). + +### Create cloud-init config file + +To see cloud-init in action, create a VM that installs a LEMP stack and runs a simple Wordpress app secured with an SSL certificate. The following cloud-init configuration installs the required packages, creates the Wordpress website, then initialize and starts the website. + +```bash +cat << EOF > cloud-init.txt +#cloud-config + +# Install, update, and upgrade packages +package_upgrade: true +package_update: true +package_reboot_if_require: true + +# Install packages +packages: + - vim + - certbot + - python3-certbot-nginx + - bash-completion + - nginx + - mysql-client + - php + - php-cli + - php-bcmath + - php-curl + - php-imagick + - php-intl + - php-json + - php-mbstring + - php-mysql + - php-gd + - php-xml + - php-xmlrpc + - php-zip + - php-fpm + +write_files: + - owner: www-data:www-data + path: /etc/nginx/sites-available/default.conf + content: | + server { + listen 80 default_server; + listen [::]:80 default_server; + root /var/www/html; + server_name $FQDN; + } + +write_files: + - owner: www-data:www-data + path: /etc/nginx/sites-available/$FQDN.conf + content: | + upstream php { + server unix:/run/php/php8.1-fpm.sock; + } + server { + listen 443 ssl http2; + listen [::]:443 ssl http2; + + server_name $FQDN; + + ssl_certificate /etc/letsencrypt/live/$FQDN/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/$FQDN/privkey.pem; + + root /var/www/$FQDN; + index index.php; + + location / { + try_files \$uri \$uri/ /index.php?\$args; + } + location ~ \.php$ { + include fastcgi_params; + fastcgi_intercept_errors on; + fastcgi_pass php; + fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name; + } + location ~* \.(js|css|png|jpg|jpeg|gif|ico)$ { + expires max; + log_not_found off; + } + location = /favicon.ico { + log_not_found off; + access_log off; + } + + location = /robots.txt { + allow all; + log_not_found off; + access_log off; + } + } + server { + listen 80; + listen [::]:80; + server_name $FQDN; + return 301 https://$FQDN\$request_uri; + } + +runcmd: + - sed -i 's/;cgi.fix_pathinfo.*/cgi.fix_pathinfo = 1/' /etc/php/8.1/fpm/php.ini + - sed -i 's/^max_execution_time \= .*/max_execution_time \= 300/g' /etc/php/8.1/fpm/php.ini + - sed -i 's/^upload_max_filesize \= .*/upload_max_filesize \= 64M/g' /etc/php/8.1/fpm/php.ini + - sed -i 's/^post_max_size \= .*/post_max_size \= 64M/g' /etc/php/8.1/fpm/php.ini + - systemctl restart php8.1-fpm + - systemctl restart nginx + - certbot --nginx certonly --non-interactive --agree-tos -d $FQDN -m dummy@dummy.com --redirect + - ln -s /etc/nginx/sites-available/$FQDN.conf /etc/nginx/sites-enabled/ + - rm /etc/nginx/sites-enabled/default + - systemctl restart nginx + - curl --url https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar --output /tmp/wp-cli.phar + - mv /tmp/wp-cli.phar /usr/local/bin/wp + - chmod +x /usr/local/bin/wp + - wp cli update + - mkdir -m 0755 -p /var/www/$FQDN + - chown -R azureadmin:www-data /var/www/$FQDN + - sudo -u azureadmin -i -- wp core download --path=/var/www/$FQDN + - sudo -u azureadmin -i -- wp config create --dbhost=$MY_MYSQL_DB_NAME.mysql.database.azure.com --dbname=wp001 --dbuser=$MY_MYSQL_ADMIN_USERNAME --dbpass="$MY_MYSQL_ADMIN_PW" --path=/var/www/$FQDN + - sudo -u azureadmin -i -- wp core install --url=$FQDN --title="Azure hosted blog" --admin_user=$MY_WP_ADMIN_USER --admin_password="$MY_WP_ADMIN_PW" --admin_email=$MY_AZURE_USER --path=/var/www/$FQDN + - sudo -u azureadmin -i -- wp plugin update --all --path=/var/www/$FQDN + - chmod 600 /var/www/$FQDN/wp-config.php + - mkdir -p -m 0775 /var/www/$FQDN/wp-content/uploads + - chgrp www-data /var/www/$FQDN/wp-content/uploads +EOF +``` + +## Create an Azure Private DNS Zone for Azure MySQL Flexible Server + +Azure Private DNS Zone integration allows you to resolve the private DNS within the current VNET or any in-region peered VNET where the private DNS Zone is linked. You'll use [az network private-dns zone create](https://learn.microsoft.com/cli/azure/network/private-dns/zone#az-network-private-dns-zone-create) to create the private DNS zone. + +```bash +az network private-dns zone create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_DNS_LABEL.private.mysql.database.azure.com -o JSON +``` + +Results: + + +```JSON +{ + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.Network/privateDnsZones/mydnslabelxxxxxx.private.mysql.database.azure.com", + "location": "global", + "maxNumberOfRecordSets": 25000, + "maxNumberOfVirtualNetworkLinks": 1000, + "maxNumberOfVirtualNetworkLinksWithRegistration": 100, + "name": "mydnslabelxxxxxx.private.mysql.database.azure.com", + "numberOfRecordSets": 1, + "numberOfVirtualNetworkLinks": 0, + "numberOfVirtualNetworkLinksWithRegistration": 0, + "provisioningState": "Succeeded", + "resourceGroup": "myLEMPResourceGroupxxxxxx", + "tags": null, + "type": "Microsoft.Network/privateDnsZones" +} +``` + +## Create an Azure Database for MySQL - Flexible Server + +Azure Database for MySQL - Flexible Server is a managed service that you can use to run, manage, and scale highly available MySQL servers in the cloud. Create a flexible server with the [az mysql flexible-server create](https://learn.microsoft.com/cli/azure/mysql/flexible-server#az-mysql-flexible-server-create) command. A server can contain multiple databases. The following command creates a server using service defaults and variable values from your Azure CLI's local environment: + +```bash +az mysql flexible-server create \ + --admin-password $MY_MYSQL_ADMIN_PW \ + --admin-user $MY_MYSQL_ADMIN_USERNAME \ + --auto-scale-iops Disabled \ + --high-availability Disabled \ + --iops 500 \ + --location $REGION \ + --name $MY_MYSQL_DB_NAME \ + --database-name wp001 \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --sku-name Standard_B2s \ + --storage-auto-grow Disabled \ + --storage-size 20 \ + --subnet $MY_MYSQL_SN_NAME \ + --private-dns-zone $MY_DNS_LABEL.private.mysql.database.azure.com \ + --tier Burstable \ + --version 8.0.21 \ + --vnet $MY_VNET_NAME \ + --yes -o JSON +``` + +Results: + + +```JSON +{ + "databaseName": "wp001", + "host": "mydbxxxxxx.mysql.database.azure.com", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.DBforMySQL/flexibleServers/mydbxxxxxx", + "location": "East US", + "resourceGroup": "myLEMPResourceGroupxxxxxx", + "skuname": "Standard_B2s", + "subnetId": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.Network/virtualNetworks/myVNetxxxxxx/subnets/myMySQLSNxxxxxx", + "username": "dbadminxxxxxx", + "version": "8.0.21" +} +``` + +```bash +echo "Your MySQL user $MY_MYSQL_ADMIN_USERNAME password is: $MY_WP_ADMIN_PW" +``` + +The server created has the below attributes: + +* The server name, admin username, admin password, resource group name, location are already specified in local context environment of the cloud shell, and will be created in the same location as your the resource group and the other Azure components. +* Service defaults for remaining server configurations: compute tier (Burstable), compute size/SKU (Standard_B2s), backup retention period (7 days), and MySQL version (8.0.21) +* The default connectivity method is Private access (VNet Integration) with a linked virtual network and a auto-generated subnet. + +> [!NOTE] +> The connectivity method cannot be changed after creating the server. For example, if you selected `Private access (VNet Integration)` during create then you cannot change to `Public access (allowed IP addresses)` after create. We highly recommend creating a server with Private access to securely access your server using VNet Integration. Learn more about Private access in the [concepts article](https://learn.microsoft.com/azure/mysql/flexible-server/concepts-networking-vnet). + +If you'd like to change any defaults, please refer to the Azure CLI [reference documentation](https://learn.microsoft.com/cli/azure//mysql/flexible-server) for the complete list of configurable CLI parameters. + +## Check the Azure Database for MySQL - Flexible Server status + +It takes a few minutes to create the Azure Database for MySQL - Flexible Server and supporting resources. + +```bash +runtime="10 minute"; +endtime=$(date -ud "$runtime" +%s); +while [[ $(date -u +%s) -le $endtime ]]; do + STATUS=$(az mysql flexible-server show -g $MY_RESOURCE_GROUP_NAME -n $MY_MYSQL_DB_NAME --query state -o tsv); + echo $STATUS; + if [ "$STATUS" == 'Ready' ]; then + break; + else + sleep 10; + fi; +done +``` + +## Configure server parameters in Azure Database for MySQL - Flexible Server + +You can manage Azure Database for MySQL - Flexible Server configuration using server parameters. The server parameters are configured with the default and recommended value when you create the server. + +Show server parameter details +To show details about a particular parameter for a server, run the [az mysql flexible-server parameter show](https://learn.microsoft.com/cli/azure/mysql/flexible-server/parameter) command. + +### Disable Azure Database for MySQL - Flexible Server SSL connection parameter for Wordpress integration + +Modify a server parameter value +You can also modify the value of a certain server parameter, which updates the underlying configuration value for the MySQL server engine. To update the server parameter, use the [az mysql flexible-server parameter set](https://learn.microsoft.com/cli/azure/mysql/flexible-server/parameter#az-mysql-flexible-server-parameter-set) command. + +```bash +az mysql flexible-server parameter set \ + -g $MY_RESOURCE_GROUP_NAME \ + -s $MY_MYSQL_DB_NAME \ + -n require_secure_transport -v "OFF" -o JSON +``` + +Results: + + +```JSON +{ + "allowedValues": "ON,OFF", + "currentValue": "OFF", + "dataType": "Enumeration", + "defaultValue": "ON", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.DBforMySQL/flexibleServers/mydbxxxxxx/configurations/require_secure_transport", + "isConfigPendingRestart": "False", + "isDynamicConfig": "True", + "isReadOnly": "False", + "name": "require_secure_transport", + "resourceGroup": "myLEMPResourceGroupxxxxxx", + "source": "user-override", + "systemData": null, + "type": "Microsoft.DBforMySQL/flexibleServers/configurations", + "value": "OFF" +} +``` + +## Create an Azure Linux Virtual Machine + +The following example creates a VM named `$MY_VM_NAME` and creates SSH keys if they do not already exist in a default key location. The command also sets `$MY_VM_USERNAME` as an administrator user name. +To improve the security of Linux virtual machines in Azure, you can integrate with Azure Active Directory authentication. You can now use Azure AD as a core authentication platform and a certificate authority to SSH into a Linux VM by using Azure AD and OpenSSH certificate-based authentication. This functionality allows organizations to manage access to VMs with Azure role-based access control and Conditional Access policies. + +Create a VM with the [az vm create](https://learn.microsoft.com/cli/azure/vm#az-vm-create) command. + +```bash +az vm create \ + --name $MY_VM_NAME \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --admin-username $MY_VM_USERNAME \ + --authentication-type ssh \ + --assign-identity \ + --image $MY_VM_IMAGE \ + --location $REGION \ + --nic-delete-option Delete \ + --os-disk-caching ReadOnly \ + --os-disk-delete-option Delete \ + --os-disk-size-gb 30 \ + --size $MY_VM_SIZE \ + --generate-ssh-keys \ + --storage-sku Premium_LRS \ + --nics $MY_VM_NIC_NAME \ + --custom-data cloud-init.txt -o JSON +``` + +Results: + + +```JSON +{ + "fqdns": "mydnslabelxxxxxx.eastus.cloudapp.azure.com", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.Compute/virtualMachines/myVMNamexxxxxx", + "identity": { + "principalId": "yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy", + "tenantId": "zzzzzzzz-zzzz-zzzz-zzzz-zzzzzzzzzzzz", + "type": "SystemAssigned", + "userAssignedIdentities": null + }, + "location": "eastus", + "macAddress": "60-45-BD-D8-1D-84", + "powerState": "VM running", + "privateIpAddress": "10.19.0.4", + "resourceGroup": "myLEMPResourceGroupxxxxxx", + "zones": "" +} +``` + +## Check the Azure Linux Virtual Machine status + +It takes a few minutes to create the VM and supporting resources. The provisioningState value of Succeeded appears when the extension is successfully installed on the VM. The VM must have a running [VM agent](https://learn.microsoft.com/azure/virtual-machines/extensions/agent-linux) to install the extension. + +```bash +runtime="5 minute"; +endtime=$(date -ud "$runtime" +%s); +while [[ $(date -u +%s) -le $endtime ]]; do + STATUS=$(ssh -o StrictHostKeyChecking=no $MY_VM_USERNAME@$FQDN "cloud-init status --wait"); + echo $STATUS; + if [[ "$STATUS" == *'status: done'* ]]; then + break; + else + sleep 10; + fi; +done +``` + + + + + +## Enable Azure AD login for a Linux Virtual Machine in Azure + +The following installs the extension to enable Azure AD login for a Linux VM. VM extensions are small applications that provide post-deployment configuration and automation tasks on Azure virtual machines. + +```bash +az vm extension set \ + --publisher Microsoft.Azure.ActiveDirectory \ + --name AADSSHLoginForLinux \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --vm-name $MY_VM_NAME -o JSON +``` + +Results: + + +```JSON +{ + "autoUpgradeMinorVersion": true, + "enableAutomaticUpgrade": null, + "forceUpdateTag": null, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLEMPResourceGroupxxxxxx/providers/Microsoft.Compute/virtualMachines/myVMNamexxxxxx/extensions/AADSSHLoginForLinux", + "instanceView": null, + "location": "eastus", + "name": "AADSSHLoginForLinux", + "protectedSettings": null, + "protectedSettingsFromKeyVault": null, + "provisioningState": "Succeeded", + "publisher": "Microsoft.Azure.ActiveDirectory", + "resourceGroup": "myLEMPResourceGroupxxxxxx", + "settings": null, + "suppressFailures": null, + "tags": null, + "type": "Microsoft.Compute/virtualMachines/extensions", + "typeHandlerVersion": "1.0", + "typePropertiesType": "AADSSHLoginForLinux" +} +``` + +## Check and browse your WordPress website + +[WordPress](https://www.wordpress.org) is an open source content management system (CMS) used by over 40% of the web to create websites, blogs, and other applications. WordPress can be run on a few different Azure services: [AKS](https://learn.microsoft.com/azure/mysql/flexible-server/tutorial-deploy-wordpress-on-aks), Virtual Machines, and App Service. For a full list of WordPress options on Azure, see [WordPress on Azure Marketplace](https://azuremarketplace.microsoft.com/marketplace/apps?page=1&search=wordpress). + +This WordPress setup is only for proof of concept. To install the latest WordPress in production with recommended security settings, see the [WordPress documentation](https://codex.wordpress.org/Main_Page). + +Validate that the application is running by curling the application url: + +```bash +runtime="5 minute"; +endtime=$(date -ud "$runtime" +%s); +while [[ $(date -u +%s) -le $endtime ]]; do + if curl -I -s -f $FQDN > /dev/null ; then + curl -L -s -f $FQDN 2> /dev/null | head -n 9 + break + else + sleep 10 + fi; +done +``` + +Results: + + +```HTML + + + + + + +Azure hosted blog + + +``` + +```bash +echo "You can now visit your web server at https://$FQDN" +``` diff --git a/scenarios/ocd/CreateLinuxVMSecureWebServer/README.md b/scenarios/ocd/CreateLinuxVMSecureWebServer/README.md new file mode 100644 index 00000000..18309e57 --- /dev/null +++ b/scenarios/ocd/CreateLinuxVMSecureWebServer/README.md @@ -0,0 +1,826 @@ +# Intro to Create a NGINX Webserver Secured via HTTPS + +To secure web servers, a Transport Layer Security (TLS), previously known as Secure Sockets Layer (SSL), certificate can be used to encrypt web traffic. These TLS/SSL certificates can be stored in Azure Key Vault, and allow secure deployments of certificates to Linux virtual machines (VMs) in Azure. In this tutorial you learn how to: + +> [!div class="checklist"] + +> * Setup and secure Azure Networking +> * Create an Azure Key Vault +> * Generate or upload a certificate to the Key Vault +> * Create a VM and install the NGINX web server +> * Inject the certificate into the VM and configure NGINX with a TLS binding + +If you choose to install and use the CLI locally, this tutorial requires that you're running the Azure CLI version 2.0.30 or later. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI]( https://learn.microsoft.com//cli/azure/install-azure-cli ). + +## Variable Declaration + +List of all the environment variables you'll need to execute this tutorial: + +```bash +export NETWORK_PREFIX="$(($RANDOM % 254 + 1))" +export RANDOM_ID="$(openssl rand -hex 3)" +export MY_RESOURCE_GROUP_NAME="myResourceGroup$RANDOM_ID" +export MY_KEY_VAULT="mykeyvault$RANDOM_ID" +export MY_CERT_NAME="nginxcert$RANDOM_ID" +export REGION="eastus" +export MY_VM_NAME="myVMName$RANDOM_ID" +export MY_VM_ID_NAME="myVMIDName$RANDOM_ID" +export MY_VM_IMAGE='Ubuntu2204' +export MY_VM_USERNAME="azureuser" +export MY_VM_SIZE='Standard_DS2_v2' +export MY_VNET_NAME="myVNet$RANDOM_ID" +export MY_VM_NIC_NAME="myVMNicName$RANDOM_ID" +export MY_NSG_SSH_RULE="Allow-Access$RANDOM_ID" +export MY_VM_NIC_NAME="myVMNicName$RANDOM_ID" +export MY_VNET_PREFIX="10.$NETWORK_PREFIX.0.0/16" +export MY_SN_NAME="mySN$RANDOM_ID" +export MY_SN_PREFIX="10.$NETWORK_PREFIX.0.0/24" +export MY_PUBLIC_IP_NAME="myPublicIP$RANDOM_ID" +export MY_DNS_LABEL="mydnslabel$RANDOM_ID" +export MY_NSG_NAME="myNSGName$RANDOM_ID" +export FQDN="${MY_DNS_LABEL}.${REGION}.cloudapp.azure.com" +``` + +## Create a Resource Group + +Before you can create a secure Linux VM, create a resource group with az group create. The following example creates a resource group equal to the contents of the variable *MY_RESOURCE_GROUP_NAME* in the location specified by the variable contents *REGION*: + +```bash +az group create \ + --name $MY_RESOURCE_GROUP_NAME \ + --location $REGION -o JSON +``` + +Results: + + +```JSON +{ + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupb1404f", + "location": "eastus", + "managedBy": null, + "name": "myResourceGroupb1404f", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null, + "type": "Microsoft.Resources/resourceGroups" +} +``` + +## Set up VM Network + +Use az network vnet create to create a virtual network named *$MY_VNET_NAME* with a subnet named *$MY_SN_NAME*in the *$MY_RESOURCE_GROUP_NAME*resource group. + +```bash +az network vnet create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_VNET_NAME \ + --location $REGION \ + --address-prefix $MY_VNET_PREFIX \ + --subnet-name $MY_SN_NAME \ + --subnet-prefix $MY_SN_PREFIX -o JSON +``` + +Results: + + +```JSON +{ + "newVNet": { + "addressSpace": { + "addressPrefixes": [ + "10.168.0.0/16" + ] + }, + "bgpCommunities": null, + "ddosProtectionPlan": null, + "dhcpOptions": { + "dnsServers": [] + }, + "enableDdosProtection": false, + "enableVmProtection": null, + "encryption": null, + "extendedLocation": null, + "flowTimeoutInMinutes": null, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupb1404f/providers/Microsoft.Network/virtualNetworks/myVNetb1404f", + "ipAllocations": null, + "location": "eastus", + "name": "myVNetb1404f", + "provisioningState": "Succeeded", + "resourceGroup": "myResourceGroupb1404f", + "subnets": [ + { + "addressPrefix": "10.168.0.0/24", + "addressPrefixes": null, + "applicationGatewayIpConfigurations": null, + "delegations": [], + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupb1404f/providers/Microsoft.Network/virtualNetworks/myVNetb1404f/subnets/mySNb1404f", + "ipAllocations": null, + "ipConfigurationProfiles": null, + "ipConfigurations": null, + "name": "mySNb1404f", + "natGateway": null, + "networkSecurityGroup": null, + "privateEndpointNetworkPolicies": "Disabled", + "privateEndpoints": null, + "privateLinkServiceNetworkPolicies": "Enabled", + "provisioningState": "Succeeded", + "purpose": null, + "resourceGroup": "myResourceGroupb1404f", + "resourceNavigationLinks": null, + "routeTable": null, + "serviceAssociationLinks": null, + "serviceEndpointPolicies": null, + "serviceEndpoints": null, + "type": "Microsoft.Network/virtualNetworks/subnets" + } + ], + "tags": {}, + "type": "Microsoft.Network/virtualNetworks", + "virtualNetworkPeerings": [] + } +} +``` + +Use az network public-ip create to create a standard zone-redundant public IPv4 address named *$MY_PUBLIC_IP_NAME* in *$MY_RESOURCE_GROUP_NAME*. + +```bash +az network public-ip create \ + --name $MY_PUBLIC_IP_NAME \ + --location $REGION \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --dns-name $MY_DNS_LABEL \ + --sku Standard \ + --allocation-method static \ + --version IPv4 \ + --zone 1 2 3 -o JSON +``` + +Results: + + +```JSON +{ + "publicIp": { + "ddosSettings": null, + "deleteOption": null, + "dnsSettings": { + "domainNameLabel": "mydnslabelb1404f", + "fqdn": "mydnslabelb1404f.eastus.cloudapp.azure.com", + "reverseFqdn": null + }, + "extendedLocation": null, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupb1404f/providers/Microsoft.Network/publicIPAddresses/myPublicIPb1404f", + "idleTimeoutInMinutes": 4, + "ipAddress": "20.88.178.210", + "ipConfiguration": null, + "ipTags": [], + "linkedPublicIpAddress": null, + "location": "eastus", + "migrationPhase": null, + "name": "myPublicIPb1404f", + "natGateway": null, + "provisioningState": "Succeeded", + "publicIpAddressVersion": "IPv4", + "publicIpAllocationMethod": "Static", + "publicIpPrefix": null, + "resourceGroup": "myResourceGroupb1404f", + "servicePublicIpAddress": null, + "sku": { + "name": "Standard", + "tier": "Regional" + }, + "tags": null, + "type": "Microsoft.Network/publicIPAddresses", + "zones": [ + "1", + "2", + "3" + ] + } +} +``` + +Security rules in network security groups enable you to filter the type of network traffic that can flow in and out of virtual network subnets and network interfaces. To learn more about network security groups, see [Network security group overview](https://learn.microsoft.com/azure/virtual-network/network-security-groups-overview). + +```bash +az network nsg create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_NSG_NAME \ + --location $REGION -o JSON +``` + +Results: + + +```JSON +{ + "NewNSG": { + "defaultSecurityRules": [ + { + "access": "Allow", + "description": "Allow inbound traffic from all VMs in VNET", + "destinationAddressPrefix": "VirtualNetwork", + "destinationAddressPrefixes": [], + "destinationPortRange": "*", + "destinationPortRanges": [], + "direction": "Inbound", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupb1404f/providers/Microsoft.Network/networkSecurityGroups/myNSGNameb1404f/defaultSecurityRules/AllowVnetInBound", + "name": "AllowVnetInBound", + "priority": 65000, + "protocol": "*", + "provisioningState": "Succeeded", + "resourceGroup": "myResourceGroupb1404f", + "sourceAddressPrefix": "VirtualNetwork", + "sourceAddressPrefixes": [], + "sourcePortRange": "*", + "sourcePortRanges": [], + "type": "Microsoft.Network/networkSecurityGroups/defaultSecurityRules" + } + ], + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupb1404f/providers/Microsoft.Network/networkSecurityGroups/myNSGNameb1404f", + "location": "eastus", + "name": "myNSGNameb1404f", + "provisioningState": "Succeeded", + "resourceGroup": "myResourceGroupb1404f", + "securityRules": [], + "type": "Microsoft.Network/networkSecurityGroups" + } +} +``` + +Open ports 22 (SSH), 80 (HTTP) and 443 (HTTPS) to allow SSH and Web traffic + +```bash +az network nsg rule create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --nsg-name $MY_NSG_NAME \ + --name $MY_NSG_SSH_RULE \ + --access Allow \ + --protocol Tcp \ + --direction Inbound \ + --priority 100 \ + --source-address-prefix '*' \ + --source-port-range '*' \ + --destination-address-prefix '*' \ + --destination-port-range 22 80 443 -o JSON +``` + +Results: + + +```JSON +{ + "access": "Allow", + "description": null, + "destinationAddressPrefix": "*", + "destinationAddressPrefixes": [], + "destinationApplicationSecurityGroups": null, + "destinationPortRange": null, + "destinationPortRanges": [ + "22", + "80", + "443" + ], + "direction": "Inbound", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupb1404f/providers/Microsoft.Network/networkSecurityGroups/myNSGNameb1404f/securityRules/MY_NSG_SSH_RULE", + "name": "MY_NSG_SSH_RULE", + "priority": 100, + "protocol": "Tcp", + "provisioningState": "Succeeded", + "resourceGroup": "myResourceGroupb1404f", + "sourceAddressPrefix": "*", + "sourceAddressPrefixes": [], + "sourceApplicationSecurityGroups": null, + "sourcePortRange": "*", + "sourcePortRanges": [], + "type": "Microsoft.Network/networkSecurityGroups/securityRules" +} +``` + +And finally create the Network Interface Card (NIC): + +```bash +az network nic create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_VM_NIC_NAME \ + --location $REGION \ + --ip-forwarding false \ + --subnet $MY_SN_NAME \ + --vnet-name $MY_VNET_NAME \ + --network-security-group $MY_NSG_NAME \ + --public-ip-address $MY_PUBLIC_IP_NAME -o JSON +``` + +Results: + + +```JSON +{ + "NewNIC": { + "auxiliaryMode": "None", + "auxiliarySku": "None", + "disableTcpStateTracking": false, + "dnsSettings": { + "appliedDnsServers": [], + "dnsServers": [] + }, + "enableAcceleratedNetworking": false, + "enableIPForwarding": false, + "hostedWorkloads": [], + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupb1404f/providers/Microsoft.Network/networkInterfaces/myVMNicNameb1404f", + "ipConfigurations": [ + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupb1404f/providers/Microsoft.Network/networkInterfaces/myVMNicNameb1404f/ipConfigurations/ipconfig1", + "name": "ipconfig1", + "primary": true, + "privateIPAddress": "10.168.0.4", + "privateIPAddressVersion": "IPv4", + "privateIPAllocationMethod": "Dynamic", + "provisioningState": "Succeeded", + "resourceGroup": "myResourceGroupb1404f", + "subnet": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupb1404f/providers/Microsoft.Network/virtualNetworks/myVNetb1404f/subnets/mySNb1404f", + "resourceGroup": "myResourceGroupb1404f" + }, + "type": "Microsoft.Network/networkInterfaces/ipConfigurations" + } + ], + "location": "eastus", + "name": "myVMNicNameb1404f", + "networkSecurityGroup": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupb1404f/providers/Microsoft.Network/networkSecurityGroups/myNSGNameb1404f", + "resourceGroup": "myResourceGroupb1404f" + }, + "nicType": "Standard", + "provisioningState": "Succeeded", + "resourceGroup": "myResourceGroupb1404f", + "tapConfigurations": [], + "type": "Microsoft.Network/networkInterfaces", + "vnetEncryptionSupported": false + } +} +``` + +## Generate a certificate and store it in Azure Key Vault + +Azure Key Vault safeguards cryptographic keys and secrets, such as certificates or passwords. Key Vault helps streamline the certificate management process and enables you to maintain control of keys that access those certificates. You can create a self-signed certificate inside Key Vault, or upload an existing, trusted certificate that you already own. For this tutorial we'll create self-signed certificates inside the Key Vault and afterwards inject these certificates into a running VM. This process ensures that the most up-to-date certificates are installed on a web server during deployment. + +The following example creates an Azure Key Vault named *$MY_KEY_VAULT* in the chosen region *$REGION* with a retention policy of 7 days. This means once a secret, key, certificate, or key vault is deleted, it will remain recoverable for a configurable period of 7 to 90 calendar days. + +```bash +az keyvault create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_KEY_VAULT \ + --location $REGION \ + --retention-days 7\ + --enabled-for-deployment true -o JSON +``` + +Results: + + +```JSON +{ + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupb1404f/providers/Microsoft.KeyVault/vaults/myKeyVaultb1404f", + "location": "eastus", + "name": "myKeyVaultb1404f", + "properties": { + "accessPolicies": [ + { + "applicationId": null, + "permissions": { + "certificates": [ + "all" + ], + "keys": [ + "all" + ], + "secrets": [ + "all" + ], + "storage": [ + "all" + ] + }, + "tenantId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + } + ], + "createMode": null, + "enablePurgeProtection": null, + "enableRbacAuthorization": null, + "enableSoftDelete": true, + "enabledForDeployment": true, + "enabledForDiskEncryption": null, + "enabledForTemplateDeployment": null, + "hsmPoolResourceId": null, + "networkAcls": null, + "privateEndpointConnections": null, + "provisioningState": "Succeeded", + "publicNetworkAccess": "Enabled", + "sku": { + "family": "A", + "name": "standard" + }, + "softDeleteRetentionInDays": 7, + "tenantId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "vaultUri": "https://mykeyvaultb1404f.vault.azure.net/" + }, + "resourceGroup": "myResourceGroupb1404f", + "systemData": { + "createdAt": "2023-09-18T12:25:55.208000+00:00", + "createdBy": "example@microsoft.com", + "createdByType": "User", + "lastModifiedAt": "2023-09-18T12:25:55.208000+00:00", + "lastModifiedBy": "example@microsoft.com", + "lastModifiedByType": "User" + }, + "tags": {}, + "type": "Microsoft.KeyVault/vaults" +} +``` + +## Create a certificate and store in Azure key Vault + +Now let's generate a self-signed certificate with az keyvault certificate create that uses the default certificate policy: + +```bash +az keyvault certificate create \ + --vault-name $MY_KEY_VAULT \ + --name $MY_CERT_NAME \ + --policy "$(az keyvault certificate get-default-policy)" -o JSON +``` + +Results: + + +```JSON +{ + "cancellationRequested": false, + "csr": "MIICr...", + "error": null, + "id": "https://mykeyvault67a7ba.vault.azure.net/certificates/nginxcert67a7ba/pending", + "issuerParameters": { + "certificateTransparency": null, + "certificateType": null, + "name": "Self" + }, + "name": "nginxcert67a7ba", + "status": "completed", + "statusDetails": null, + "target": "https://mykeyvault67a7ba.vault.azure.net/certificates/nginxcert67a7ba" +} +``` + +Finally, we need to prepare the certificate so it can be used during the VM create process. To do so we need to obtain the ID of the certificate with az keyvault secret list-versions, and convert the certificate with az vm secret format. The following example assigns the output of these commands to variables for ease of use in the next steps: + +```bash +az identity create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_VM_ID_NAME -o JSON +``` + +Results: + + +```JSON +{ + "clientId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroupb1404f/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myVMIDNameb1404f", + "location": "eastus", + "name": "myVMIDNameb1404f", + "principalId": "e09ebfce-97f0-4aff-9abd-415ebd6f915c", + "resourceGroup": "myResourceGroupb1404f", + "tags": {}, + "tenantId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "type": "Microsoft.ManagedIdentity/userAssignedIdentities" +} +``` + +```bash +MY_VM_PRINCIPALID=$(az identity show --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_VM_ID_NAME --query principalId -o tsv) + +az keyvault set-policy \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_KEY_VAULT \ + --object-id $MY_VM_PRINCIPALID \ + --secret-permissions get list \ + --certificate-permissions get list -o JSON +``` + +Results: + + +```JSON +{ + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupb1404f/providers/Microsoft.KeyVault/vaults/myKeyVaultb1404f", + "location": "eastus", + "name": "myKeyVaultb1404f", + "properties": { + "accessPolicies": [ + { + "applicationId": null, + "objectId": "ceeb4e98-5831-4d9f-b8ba-2ee14b3cdf80", + "permissions": { + "certificates": [ + "all" + ], + "keys": [ + "all" + ], + "secrets": [ + "all" + ], + "storage": [ + "all" + ] + }, + "tenantId": "bd7153ee-d085-4a28-a928-2f0ef402f076" + }, + { + "applicationId": null, + "objectId": "e09ebfce-97f0-4aff-9abd-415ebd6f915c", + "permissions": { + "certificates": [ + "list", + "get" + ], + "keys": null, + "secrets": [ + "list", + "get" + ], + "storage": null + }, + "tenantId": "bd7153ee-d085-4a28-a928-2f0ef402f076" + } + ], + "createMode": null, + "enablePurgeProtection": null, + "enableRbacAuthorization": null, + "enableSoftDelete": true, + "enabledForDeployment": true, + "enabledForDiskEncryption": null, + "enabledForTemplateDeployment": null, + "hsmPoolResourceId": null, + "networkAcls": null, + "privateEndpointConnections": null, + "provisioningState": "Succeeded", + "publicNetworkAccess": "Enabled", + "sku": { + "family": "A", + "name": "standard" + }, + "softDeleteRetentionInDays": 7, + "tenantId": "bd7153ee-d085-4a28-a928-2f0ef402f076", + "vaultUri": "https://mykeyvaultb1404f.vault.azure.net/" + }, + "resourceGroup": "myResourceGroupb1404f", + "systemData": { + "createdAt": "2023-09-18T12:25:55.208000+00:00", + "createdBy": "ajoian@microsoft.com", + "createdByType": "User", + "lastModifiedAt": "2023-09-18T12:48:08.966000+00:00", + "lastModifiedBy": "ajoian@microsoft.com", + "lastModifiedByType": "User" + }, + "tags": {}, + "type": "Microsoft.KeyVault/vaults" +} +``` + +## Create the VM + +Now create a VM with az vm create. Use the --custom-data parameter to pass in the cloud-init config file, named *cloud-init-nginx.txt*. +Cloud-init is a widely used approach to customize a Linux VM as it boots for the first time. You can use cloud-init to install packages and write files, or to configure users and security. As cloud-init runs during the initial boot process, there are no extra steps or required agents to apply your configuration. +When you create a VM, certificates and keys are stored in the protected /var/lib/waagent/ directory. In this example, we are installing and configuring the NGINX web server. + +```bash +cat > cloud-init-nginx.txt </dev/null; echo "0 * * * * /root/convert_akv_cert.sh && service nginx reload") | crontab - + - service nginx restart +EOF +``` + +The following example creates a VM named *myVMName$UNIQUE_POSTFIX*: + +```bash +MY_VM_ID=$(az identity show --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_VM_ID_NAME --query id -o tsv) + +az vm create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_VM_NAME \ + --image $MY_VM_IMAGE \ + --admin-username $MY_VM_USERNAME \ + --generate-ssh-keys \ + --assign-identity $MY_VM_ID \ + --size $MY_VM_SIZE \ + --custom-data cloud-init-nginx.txt \ + --nics $MY_VM_NIC_NAME +``` + +Results: + + +```JSON +{ + "fqdns": "mydnslabel67a7ba.eastus.cloudapp.azure.com", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup67a7ba/providers/Microsoft.Compute/virtualMachines/myVMName67a7ba", + "identity": { + "systemAssignedIdentity": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "userAssignedIdentities": { + "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup67a7ba/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myVMIDName67a7ba": { + "clientId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "principalId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + } + } + }, + "location": "eastus", + "macAddress": "60-45-BD-D3-B5-29", + "powerState": "VM running", + "privateIpAddress": "10.56.0.4", + "publicIpAddress": "20.231.118.239", + "resourceGroup": "myResourceGroup67a7ba", + "zones": "" +} +``` + +## Deploying AKV extension for VM $vm_name to retrieve cert $cert_name from AKV $akv_name..." + +```bash +MY_CERT_ID=$(az keyvault certificate show --vault-name $MY_KEY_VAULT --name $MY_CERT_NAME --query sid -o tsv) +MY_VM_CLIENTID=$(az identity show --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_VM_ID_NAME --query clientId -o tsv) +MY_AKV_EXT_SETTINGS="{\"secretsManagementSettings\":{\"pollingIntervalInS\":\"3600\",\"requireInitialSync\":"true",\"certificateStoreLocation\":\"/etc/nginx/ssl/\",\"observedCertificates\":[\"$MY_CERT_ID\"]},\"authenticationSettings\":{\"msiClientId\":\"${MY_VM_CLIENTID}\"}}" + +az vm extension set \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --vm-name $MY_VM_NAME \ + -n "KeyVaultForLinux" \ + --publisher Microsoft.Azure.KeyVault \ + --version 2.0 \ + --enable-auto-upgrade true \ + --settings $MY_AKV_EXT_SETTINGS -o JSON +``` + +Results: + + +```JSON +{ + "autoUpgradeMinorVersion": true, + "enableAutomaticUpgrade": true, + "forceUpdateTag": null, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup67a7ba/providers/Microsoft.Compute/virtualMachines/myVMName67a7ba/extensions/KeyVaultForLinux", + "instanceView": null, + "location": "eastus", + "name": "KeyVaultForLinux", + "protectedSettings": null, + "protectedSettingsFromKeyVault": null, + "provisioningState": "Succeeded", + "publisher": "Microsoft.Azure.KeyVault", + "resourceGroup": "myResourceGroup67a7ba", + "settings": { + "secretsManagementSettings": { + "certificateStoreLocation": "/etc/nginx/ssl", + "observedCertificates": [ + "https://mykeyvault67a7ba.vault.azure.net/secrets/nginxcert67a7ba/aac9b30a90c04fc58bc230ae15b1148f" + ], + "pollingIntervalInS": "3600" + } + }, + "suppressFailures": null, + "tags": null, + "type": "Microsoft.Compute/virtualMachines/extensions", + "typeHandlerVersion": "2.0", + "typePropertiesType": "KeyVaultForLinux" +} +``` + +## Enable Azure AD login for a Linux Virtual Machine in Azure + +The following example deploys a VM and then installs the extension to enable Azure AD login for a Linux VM. VM extensions are small applications that provide post-deployment configuration and automation tasks on Azure virtual machines. + +```bash +az vm extension set \ + --publisher Microsoft.Azure.ActiveDirectory \ + --name AADSSHLoginForLinux \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --vm-name $MY_VM_NAME -o JSON +``` + +Results: + + +```JSON +{ + "autoUpgradeMinorVersion": true, + "enableAutomaticUpgrade": null, + "forceUpdateTag": null, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroupfa636b/providers/Microsoft.Compute/virtualMachines/myVMNamefa636b/extensions/AADSSHLoginForLinux", + "instanceView": null, + "location": "eastus", + "name": "AADSSHLoginForLinux", + "protectedSettings": null, + "protectedSettingsFromKeyVault": null, + "provisioningState": "Succeeded", + "publisher": "Microsoft.Azure.ActiveDirectory", + "resourceGroup": "myResourceGroupfa636b", + "settings": null, + "suppressFailures": null, + "tags": null, + "type": "Microsoft.Compute/virtualMachines/extensions", + "typeHandlerVersion": "1.0", + "typePropertiesType": "AADSSHLoginForLinux" +} +``` + +## Browse your secure website + +Validate that the application is running by visiting the application url: + +```bash +curl --max-time 120 -k "https://$FQDN" +``` + +Results: + + +```html + + + +Welcome to nginx! + + + +

Welcome to nginx!

+

If you see this page, the nginx web server is successfully installed and +working. Further configuration is required.

+ +

For online documentation and support please refer to +nginx.org.
+Commercial support is available at +nginx.com.

+ +

Thank you for using nginx.

+ + +``` diff --git a/scenarios/testing/CommentTest.md b/scenarios/testing/CommentTest.md new file mode 100644 index 00000000..05627f9b --- /dev/null +++ b/scenarios/testing/CommentTest.md @@ -0,0 +1,22 @@ + + + + + + +# Testing multi Line code block + +```azurecli-interactive +echo "Hello \ +world" +``` + +# This is what the output should be + +```text +hello world +``` \ No newline at end of file diff --git a/scenarios/testing/brokenMarkdown.md b/scenarios/testing/brokenMarkdown.md new file mode 100644 index 00000000..2754fdb4 --- /dev/null +++ b/scenarios/testing/brokenMarkdown.md @@ -0,0 +1,7 @@ +This is a markdown file which does not pass the requirements... It has a code block which never ends. + +Innovation Engine should be able to exit the program automatically instead of hanging + +```bash +echo "hello World" +`` diff --git a/scenarios/testing/createRG.md b/scenarios/testing/createRG.md new file mode 100644 index 00000000..f53b7693 --- /dev/null +++ b/scenarios/testing/createRG.md @@ -0,0 +1,29 @@ + + + +## Create a resource group + +Create a resource group with the [az group create](/cli/azure/group) command. An Azure resource group is a logical container into which Azure resources are deployed and managed. The following example creates a resource group named *myResourceGroup* in the *eastus* location: + +```bash +az group create --name $MY_RESOURCE_GROUP_NAME --location $MY_LOCATION +``` + + +```Output +{ + "fqdns": "", + "id": "/subscriptions//resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM", + "location": "eastus", + "macAddress": "00-0D-3A-23-9A-49", + "powerState": "VM running", + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "40.68.254.142", + "resourceGroup": "myResourceGroup" +} +``` \ No newline at end of file diff --git a/scenarios/testing/e2eAzureTestCommentVariables.md b/scenarios/testing/e2eAzureTestCommentVariables.md new file mode 100644 index 00000000..2106218f --- /dev/null +++ b/scenarios/testing/e2eAzureTestCommentVariables.md @@ -0,0 +1,159 @@ +--- +title: 'Quickstart: Use the Azure CLI to create a Linux VM' +description: In this quickstart, you learn how to use the Azure CLI to create a Linux virtual machine +author: cynthn +ms.service: virtual-machines +ms.collection: linux +ms.topic: quickstart +ms.workload: infrastructure +ms.date: 06/01/2022 +ms.author: cynthn +ms.custom: mvc, seo-javascript-september2019, seo-javascript-october2019, seo-python-october2019, devx-track-azurecli, mode-api +--- + + + +# Quickstart: Create a Linux virtual machine with the Azure CLI + +**Applies to:** :heavy_check_mark: Linux VMs + +This quickstart shows you how to use the Azure CLI to deploy a Linux virtual machine (VM) in Azure. The Azure CLI is used to create and manage Azure resources via either the command line or scripts. + +In this tutorial, we will be installing the latest Debian image. To show the VM in action, you'll connect to it using SSH and install the NGINX web server. + +If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. + +## Launch Azure Cloud Shell + +The Azure Cloud Shell is a free interactive shell that you can use to run the steps in this article. It has common Azure tools preinstalled and configured to use with your account. + +To open the Cloud Shell, just select **Try it** from the upper right corner of a code block. You can also open Cloud Shell in a separate browser tab by going to [https://shell.azure.com/bash](https://shell.azure.com/bash). Select **Copy** to copy the blocks of code, paste it into the Cloud Shell, and select **Enter** to run it. + +If you prefer to install and use the CLI locally, this quickstart requires Azure CLI version 2.0.30 or later. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI]( /cli/azure/install-azure-cli). + + + +## Create a resource group + +Create a resource group with the [az group create](/cli/azure/group) command. An Azure resource group is a logical container into which Azure resources are deployed and managed. The following example creates a resource group named *myResourceGroup* in the *eastus* location: + +```bash +az group create --name $MY_RESOURCE_GROUP_NAME --location $MY_LOCATION +``` + +## Create virtual machine + +Create a VM with the [az vm create](/cli/azure/vm) command. + +The following example creates a VM named *myVM* and adds a user account named *azureuser*. The `--generate-ssh-keys` parameter is used to automatically generate an SSH key, and put it in the default key location (*~/.ssh*). To use a specific set of keys instead, use the `--ssh-key-values` option. + +```bash +az vm create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_VM_NAME \ + --image $MY_VM_IMAGE \ + --admin-username $MY_ADMIN_USERNAME \ + --generate-ssh-keys +``` + +It takes a few minutes to create the VM and supporting resources. The following example output shows the VM create operation was successful. + +```Output +{ + "fqdns": "", + "id": "/subscriptions//resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM", + "location": "eastus", + "macAddress": "00-0D-3A-23-9A-49", + "powerState": "VM running", + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "40.68.254.142", + "resourceGroup": "myResourceGroup" +} +``` + +Make a note of the `publicIpAddress` to use later. + +## Install web server + +To see your VM in action, install the NGINX web server. Update your package sources and then install the latest NGINX package. + +```bash +az vm run-command invoke \ + -g $MY_RESOURCE_GROUP_NAME \ + -n $MY_VM_NAME \ + --command-id RunShellScript \ + --scripts "sudo apt-get update && sudo apt-get install -y nginx" +``` + +## Open port 80 for web traffic + +By default, only SSH connections are opened when you create a Linux VM in Azure. Use [az vm open-port](/cli/azure/vm) to open TCP port 80 for use with the NGINX web server: + +```bash +az vm open-port --port 80 --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_VM_NAME +``` + +## View the web server in action + +Use a web browser of your choice to view the default NGINX welcome page. Use the public IP address of your VM as the web address. The following example shows the default NGINX web site: + +![Screenshot showing the N G I N X default web page.](./media/quick-create-cli/nginix-welcome-page-debian.png) + +Or Run the following command to see the NGINX welcome page in terminal + +```bash + curl $(az vm show -d -g $MY_RESOURCE_GROUP_NAME -n $MY_VM_NAME --query "publicIps" -o tsv) +``` + + +```HTML + + + +Welcome to nginx! + + + +

Welcome to nginx!

+

If you see this page, the nginx web server is successfully installed and +working. Further configuration is required.

+ +

For online documentation and support please refer to +nginx.org.
+Commercial support is available at +nginx.com.

+ +

Thank you for using nginx.

+ + +``` + +## Clean up resources + +When no longer needed, you can use the [az group delete](/cli/azure/group) command to remove the resource group, VM, and all related resources. + +```bash +az group delete --name $MY_RESOURCE_GROUP_NAME --no-wait --yes --verbose +``` + +## Next steps + +In this quickstart, you deployed a simple virtual machine, opened a network port for web traffic, and installed a basic web server. To learn more about Azure virtual machines, continue to the tutorial for Linux VMs. + + +> [!div class="nextstepaction"] +> [Azure Linux virtual machine tutorials](./tutorial-manage-vm.md) diff --git a/scenarios/testing/fuzzyMatchTest.md b/scenarios/testing/fuzzyMatchTest.md new file mode 100644 index 00000000..f58e37c3 --- /dev/null +++ b/scenarios/testing/fuzzyMatchTest.md @@ -0,0 +1,51 @@ +# Testing multi Line code block + +```azurecli-interactive +echo "Hello World" +``` +This is what the expected output should be + +```text +Hello world +``` + + +# Testing multi Line code block + +```azurecli-interactive +echo "Hello \ +world" +``` + +# Output Should Fail + +```text +world Hello +``` + +# Code block + +```azurecli-interactive +echo "Hello \ +world" +``` + +# Output Should Pass + +```text +Hello world +``` + +# Code block + +```azurecli-interactive +echo "Hello \ +world" +``` + +# Bad similarity - should fail + +```text +Hello world +``` + diff --git a/scenarios/testing/nonCLI.md b/scenarios/testing/nonCLI.md new file mode 100644 index 00000000..9d16b6fb --- /dev/null +++ b/scenarios/testing/nonCLI.md @@ -0,0 +1,114 @@ +--- +title: Quickstart - Create a Linux VM in the Azure portal +description: In this quickstart, you learn how to use the Azure portal to create a Linux virtual machine. +author: cynthn +ms.service: virtual-machines +ms.collection: linux +ms.topic: quickstart +ms.workload: infrastructure +ms.date: 08/01/2022 +ms.author: cynthn +ms.custom: mvc, mode-ui +--- +This document will not be a CLI document. I am curious what innovation engine will do in this case. + +# Quickstart: Create a Linux virtual machine in the Azure portal + +**Applies to:** :heavy_check_mark: Linux VMs + +Azure virtual machines (VMs) can be created through the Azure portal. The Azure portal is a browser-based user interface to create Azure resources. This quickstart shows you how to use the Azure portal to deploy a Linux virtual machine (VM) running Ubuntu 18.04 LTS. To see your VM in action, you also SSH to the VM and install the NGINX web server. + +If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. + +## Sign in to Azure + +Sign in to the [Azure portal](https://portal.azure.com). + +## Create virtual machine + +1. Enter *virtual machines* in the search. +1. Under **Services**, select **Virtual machines**. +1. In the **Virtual machines** page, select **Create** and then **Virtual machine**. The **Create a virtual machine** page opens. + +1. In the **Basics** tab, under **Project details**, make sure the correct subscription is selected and then choose to **Create new** resource group. Enter *myResourceGroup* for the name.*. + + ![Screenshot of the Project details section showing where you select the Azure subscription and the resource group for the virtual machine](./media/quick-create-portal/project-details.png) + +1. Under **Instance details**, enter *myVM* for the **Virtual machine name**, and choose *Ubuntu 18.04 LTS - Gen2* for your **Image**. Leave the other defaults. The default size and pricing is only shown as an example. Size availability and pricing are dependent on your region and subscription. + + :::image type="content" source="media/quick-create-portal/instance-details.png" alt-text="Screenshot of the Instance details section where you provide a name for the virtual machine and select its region, image, and size."::: + + > [!NOTE] + > Some users will now see the option to create VMs in multiple zones. To learn more about this new capability, see [Create virtual machines in an availability zone](../create-portal-availability-zone.md). + > :::image type="content" source="../media/create-portal-availability-zone/preview.png" alt-text="Screenshot showing that you have the option to create virtual machines in multiple availability zones."::: + + +1. Under **Administrator account**, select **SSH public key**. + +1. In **Username** enter *azureuser*. + +1. For **SSH public key source**, leave the default of **Generate new key pair**, and then enter *myKey* for the **Key pair name**. + + ![Screenshot of the Administrator account section where you select an authentication type and provide the administrator credentials](./media/quick-create-portal/administrator-account.png) + +1. Under **Inbound port rules** > **Public inbound ports**, choose **Allow selected ports** and then select **SSH (22)** and **HTTP (80)** from the drop-down. + + ![Screenshot of the inbound port rules section where you select what ports inbound connections are allowed on](./media/quick-create-portal/inbound-port-rules.png) + +1. Leave the remaining defaults and then select the **Review + create** button at the bottom of the page. + +1. On the **Create a virtual machine** page, you can see the details about the VM you are about to create. When you are ready, select **Create**. + +1. When the **Generate new key pair** window opens, select **Download private key and create resource**. Your key file will be download as **myKey.pem**. Make sure you know where the `.pem` file was downloaded; you will need the path to it in the next step. + +1. When the deployment is finished, select **Go to resource**. + +1. On the page for your new VM, select the public IP address and copy it to your clipboard. + + + ![Screenshot showing how to copy the IP address for the virtual machine](./media/quick-create-portal/ip-address.png) + + +## Connect to virtual machine + +Create an SSH connection with the VM. + +1. If you are on a Mac or Linux machine, open a Bash prompt and set read-only permission on the .pem file using `chmod 400 ~/Downloads/myKey.pem`. If you are on a Windows machine, open a PowerShell prompt. + +1. At your prompt, open an SSH connection to your virtual machine. Replace the IP address with the one from your VM, and replace the path to the `.pem` with the path to where the key file was downloaded. + +```console +ssh -i ~/Downloads/myKey.pem azureuser@10.111.12.123 +``` + +> [!TIP] +> The SSH key you created can be used the next time your create a VM in Azure. Just select the **Use a key stored in Azure** for **SSH public key source** the next time you create a VM. You already have the private key on your computer, so you won't need to download anything. + +## Install web server + +To see your VM in action, install the NGINX web server. From your SSH session, update your package sources and then install the latest NGINX package. + +```bash +sudo apt-get -y update +sudo apt-get -y install nginx +``` + +When done, type `exit` to leave the SSH session. + + +## View the web server in action + +Use a web browser of your choice to view the default NGINX welcome page. Type the public IP address of the VM as the web address. The public IP address can be found on the VM overview page or as part of the SSH connection string you used earlier. + +![Screenshot showing the NGINX default site in a browser](./media/quick-create-portal/nginx.png) + +## Clean up resources + +When no longer needed, you can delete the resource group, virtual machine, and all related resources. To do so, select the resource group for the virtual machine, select **Delete**, then confirm the name of the resource group to delete. + +## Next steps + +In this quickstart, you deployed a simple virtual machine, created a Network Security Group and rule, and installed a basic web server. To learn more about Azure virtual machines, continue to the tutorial for Linux VMs. + +> [!div class="nextstepaction"] +> [Azure Linux virtual machine tutorials](./tutorial-manage-vm.md) diff --git a/scenarios/testing/test.md b/scenarios/testing/test.md new file mode 100644 index 00000000..164f31b3 --- /dev/null +++ b/scenarios/testing/test.md @@ -0,0 +1,172 @@ +--- +title: 'Quickstart: Use the Azure CLI to create a Linux VM' +--- + +# Prerequisites + +Innovation Engine can process prerequisites for documents. This code section tests that the pre requisites functionality works in Innovation Engine. +It will run the following real prerequisites along with a look for and fail to run a fake prerequisite. + +You must have completed [Fuzzy Matching Test](testScripts/fuzzyMatchTest.md) and you must have completed [Comment Test](testScripts/CommentTest.md) + +You also need to have completed [This is a fake file](testScripts/fakefile.md) + +And there are going to be additional \ and ( to throw off the algorithm... + +# Running simple bash commands + +Innovation engine can execute bash commands. For example + + +```bash +echo "Hello World" +``` + +# Test Code block with expected output + +```azurecli-interactive +echo "Hello \ +world" +``` + +It also can test the output to make sure everything ran as planned. + +``` +Hello world +``` + +# Test non-executable code blocks +If a code block does not have an executable tag it will simply render the codeblock as text + +For example: + +```YAML +apiVersion: apps/v1 +kind: Deployment +metadata: + name: azure-vote-back +spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-back + template: + metadata: + labels: + app: azure-vote-back + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-back + image: mcr.microsoft.com/oss/bitnami/redis:6.0.8 + env: + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 6379 + name: redis +--- +apiVersion: v1 +kind: Service +metadata: + name: azure-vote-back +spec: + ports: + - port: 6379 + selector: + app: azure-vote-back +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: azure-vote-front +spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-front + template: + metadata: + labels: + app: azure-vote-front + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-front + image: mcr.microsoft.com/azuredocs/azure-vote-front:v1 + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 80 + env: + - name: REDIS + value: "azure-vote-back" +--- +apiVersion: v1 +kind: Service +metadata: + name: azure-vote-front +spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: azure-vote-front + +``` + +# Testing regular comments + +Innovation engine is able to handle comments and actual do fancy things with special comments. + +There are comments you can't see here. + + + + + +# Testing Declaring Environment Variables from Comments +Innovation Engine can declare environment variables via hidden inline comments. This feature is useful for running documents E2E as part of CI/CD + + + + +```azurecli-interactive +echo $MY_VARIABLE +``` + + +# Test Running an Azure Command +```azurecli-interactive +az group exists --name MyResourceGroup +``` + +# Next Steps + +These are the next steps... at some point we need to do something here \ No newline at end of file diff --git a/scenarios/testing/variableHierarchy.ini b/scenarios/testing/variableHierarchy.ini new file mode 100644 index 00000000..84c5cad7 --- /dev/null +++ b/scenarios/testing/variableHierarchy.ini @@ -0,0 +1,2 @@ +MY_RESOURCE_GROUP = SetInINI +MY_VARIABLE_NAME = VariableFromIni \ No newline at end of file diff --git a/scenarios/testing/variableHierarchy.md b/scenarios/testing/variableHierarchy.md new file mode 100644 index 00000000..003d0e34 --- /dev/null +++ b/scenarios/testing/variableHierarchy.md @@ -0,0 +1,26 @@ +This document is to show the hierarchy of environment variables + + + + +```bash +echo $MY_RESOURCE_GROUP +echo $MY_VARIABLE_NAME +``` + +# The following will now declare variables locally which will overwrite comment variables + +```bash +export MY_RESOURCE_GROUP=RGSetLocally +export MY_VARIABLE_NAME=LocallySetVariable +``` + +```bash +echo $MY_RESOURCE_GROUP +echo $MY_VARIABLE_NAME +``` diff --git a/scripts/install_from_release.sh b/scripts/install_from_release.sh new file mode 100644 index 00000000..3b8ac8ad --- /dev/null +++ b/scripts/install_from_release.sh @@ -0,0 +1,22 @@ +set -e + +# Download the binary from the latest +echo "Installing IE & scenarios from the latest release..." +wget -q -O ie https://github.com/Azure/InnovationEngine/releases/download/latest/ie > /dev/null +wget -q -O scenarios.zip https://github.com/Azure/InnovationEngine/releases/download/latest/scenarios.zip > /dev/null + +# Setup permissions & move to the local bin +chmod +x ie > /dev/null +mkdir -p ~/.local/bin > /dev/null +mv ie ~/.local/bin > /dev/null + +# Unzip the scenarios, overwrite if they already exist. +unzip -o scenarios.zip -d ~ > /dev/null +rm scenarios.zip > /dev/null + +# Export the path to IE if it's not already available +if [[ !"$PATH" =~ "~/.local/bin" || !"$PATH" =~ "$HOME/.local/bin" ]]; then + export PATH="$PATH:~/.local/bin" +fi + +echo "Done." diff --git a/tutorial.md b/tutorial.md new file mode 100644 index 00000000..85cc604b --- /dev/null +++ b/tutorial.md @@ -0,0 +1,128 @@ +# Welcome to the innovation Engine Tutorial +## *TODO ADD MORE DETAIL TO IMPROVE TUTORIAL* + +# Running simple bash commands + +Innovation engine can execute bash commands. For example + +```bash +export VAR="Hello World" +``` + +```bash +echo $VAR +``` + +# Test Code block with expected output + +```azurecli-interactive +echo "Hello \ +world" +``` + +It also can test the output to make sure everything ran as planned. + +``` +Hello world +``` + +# Executable vs non-executable code blocks +Innovation engine supports code blocks which are both executable and non-executable. A code block is executable if the label/tag after the bash scripts is one of the supported executable tags. Those tags are: bash, terraform, azurecli-interactive, and azurecli. + +If a code block has a non supported tag like YAML or HTML it will simply render the code block as text and continue parsing the document. + +For example: + +```YAML +apiVersion: apps/v1 +kind: Deployment +metadata: + name: azure-vote-back +spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-back + template: + metadata: + labels: + app: azure-vote-back + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-back + image: mcr.microsoft.com/oss/bitnami/redis:6.0.8 + env: + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 6379 + name: redis +--- +apiVersion: v1 +kind: Service +metadata: + name: azure-vote-back +spec: + ports: + - port: 6379 + selector: + app: azure-vote-back +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: azure-vote-front +spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-front + template: + metadata: + labels: + app: azure-vote-front + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-front + image: mcr.microsoft.com/azuredocs/azure-vote-front:v1 + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 80 + env: + - name: REDIS + value: "azure-vote-back" +--- +apiVersion: v1 +kind: Service +metadata: + name: azure-vote-front +spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: azure-vote-front + +``` + + +# Next Steps + +These are the next steps... at some point we need to do something here \ No newline at end of file