From 4a21e0fede3828818a01c38cc0f4acb281971150 Mon Sep 17 00:00:00 2001 From: JoeAldinger Date: Wed, 4 Feb 2026 16:15:42 -0500 Subject: [PATCH] NOP-7 --- .../configuring-dns-forwarding-with-tls.adoc | 53 +++++++++++-------- .../k8s-nmstate-deploying-nmstate-CLI.adoc | 3 +- ...lling-the-kubernetes-nmstate-operator.adoc | 1 + modules/k8s-nmstate-uninstall-operator.adoc | 9 +++- modules/nw-bpfman-infw-about.adoc | 5 +- modules/nw-bpfman-infw-configure.adoc | 3 ++ modules/nw-controlling-dns-pod-placement.adoc | 51 +++++++++++------- modules/nw-dns-cache-tuning.adoc | 19 ++++--- modules/nw-dns-forward.adoc | 53 +++++++++++-------- modules/nw-dns-loglevel.adoc | 3 ++ modules/nw-dns-operator-logs.adoc | 5 +- modules/nw-dns-operator-managementState.adoc | 17 +++--- modules/nw-dns-operator-status.adoc | 4 +- modules/nw-dns-operator.adoc | 5 +- modules/nw-dns-operatorloglevel.adoc | 5 +- modules/nw-dns-view.adoc | 16 +++--- modules/nw-dns-viewlog.adoc | 6 ++- modules/nw-infw-operator-config-object.adoc | 12 ++++- modules/nw-infw-operator-cr.adoc | 5 +- modules/nw-infw-operator-deploying.adoc | 5 +- modules/nw-infw-operator-installing-cli.adoc | 8 +-- .../nw-infw-operator-installing-console.adoc | 6 +-- modules/nw-infw-operator-rules-object.adoc | 27 +++++----- modules/nw-infw-operator-troubleshooting.adoc | 5 ++ modules/nw-infw-operator-viewing.adoc | 3 ++ ...-stats-collected-kubernetes-nmtate-op.adoc | 3 +- .../networking_operators/dns-operator.adoc | 4 +- .../ingress-node-firewall-operator.adoc | 1 + 28 files changed, 205 insertions(+), 132 deletions(-) diff --git a/modules/configuring-dns-forwarding-with-tls.adoc b/modules/configuring-dns-forwarding-with-tls.adoc index f8a1270b2c82..c03efa9819f5 100644 --- a/modules/configuring-dns-forwarding-with-tls.adoc +++ b/modules/configuring-dns-forwarding-with-tls.adoc @@ -6,6 +6,9 @@ [id="configuring-dns-forwarding-with-tls_{context}"] = Configuring DNS forwarding with TLS +[role="_abstract"] +Configure DNS forwarding with TLS to secure queries to upstream resolvers. + When working in a highly regulated environment, you might need the ability to secure DNS traffic when forwarding requests to upstream resolvers so that you can ensure additional DNS traffic and data privacy. Be aware that CoreDNS caches forwarded connections for 10 seconds. CoreDNS will hold a TCP connection open for those 10 seconds if no request is issued. @@ -42,21 +45,21 @@ metadata: name: default spec: servers: - - name: example-server <1> + - name: example_server zones: - - example.com <2> + - example.com forwardPlugin: transportConfig: - transport: TLS <3> + transport: TLS tls: caBundle: name: mycacert - serverName: dnstls.example.com <4> - policy: Random <5> - upstreams: <6> + serverName: dnstls.example.com + policy: Random + upstreams: - 1.1.1.1 - 2.2.2.2:5353 - upstreamResolvers: <7> + upstreamResolvers: transportConfig: transport: TLS tls: @@ -64,20 +67,22 @@ spec: name: mycacert serverName: dnstls.example.com upstreams: - - type: Network <8> - address: 1.2.3.4 <9> - port: 53 <10> + - type: Network + address: 1.2.3.4 + port: 53 ---- -<1> Must comply with the `rfc6335` service name syntax. -<2> Must conform to the definition of a subdomain in the `rfc1123` service name syntax. The cluster domain, `cluster.local`, is an invalid subdomain for the `zones` field. The cluster domain, `cluster.local`, is an invalid `subdomain` for `zones`. -<3> When configuring TLS for forwarded DNS queries, set the `transport` field to have the value `TLS`. -<4> When configuring TLS for forwarded DNS queries, this is a mandatory server name used as part of the server name indication (SNI) to validate the upstream TLS server certificate. -<5> Defines the policy to select upstream resolvers. Default value is `Random`. You can also use the values `RoundRobin`, and `Sequential`. -<6> Required. Use it to provide upstream resolvers. A maximum of 15 `upstreams` entries are allowed per `forwardPlugin` entry. -<7> Optional. You can use it to override the default policy and forward DNS resolution to the specified DNS resolvers (upstream resolvers) for the default domain. If you do not provide any upstream resolvers, the DNS name queries go to the servers in `/etc/resolv.conf`. -<8> Only the `Network` type is allowed when using TLS and you must provide an IP address. `Network` type indicates that this upstream resolver should handle forwarded requests separately from the upstream resolvers listed in `/etc/resolv.conf`. -<9> The `address` field must be a valid IPv4 or IPv6 address. -<10> You can optionally provide a port. The `port` must have a value between `1` and `65535`. If you do not specify a port for the upstream, the default port is 853. ++ +where: + +`spec.servers.name`:: Must comply with the `rfc6335` service name syntax. +`spec.servers.zones`:: Must conform to the `rfc1123` subdomain syntax. The cluster domain, `cluster.local`, is invalid for `zones`. +`spec.servers.forwardPlugin.transportConfig.transport`:: Must be set to `TLS` when configuring TLS forwarding. +`spec.servers.forwardPlugin.transportConfig.tls.serverName`:: Must be set to the server name indication (SNI) server name used to validate the upstream TLS certificate. +`spec.servers.forwardPlugin.policy`:: Specifies the upstream selection policy. Defaults to `Random`; valid values are `RoundRobin` and `Sequential`. +`spec.servers.forwardPlugin.upstreams`:: Must provide upstream resolvers; maximum 15 entries per `forwardPlugin`. +`spec.upstreamResolvers.upstreams`:: Specifies an optional field to override the default policy for the default domain. Use the `Network` type only when TLS is enabled and provide an IP address. If omitted, queries use `/etc/resolv.conf`. +`spec.upstreamResolvers.upstreams.address`:: Must be a valid IPv4 or IPv6 address. +`spec.upstreamResolvers.upstreams.port`:: Specifies an optional field to provide a port number. Valid values are between `1` and `65535`; defaults to 853 when omitted. + [NOTE] ==== @@ -103,7 +108,7 @@ data: forward . 1.1.1.1 2.2.2.2:5353 } bar.com:5353 example.com:5353 { - forward . 3.3.3.3 4.4.4.4:5454 <1> + forward . 3.3.3.3 4.4.4.4:5454 } .:5353 { errors @@ -127,9 +132,11 @@ metadata: name: dns-default namespace: openshift-dns ---- -<1> Changes to the `forwardPlugin` triggers a rolling update of the CoreDNS daemon set. ++ + +** The `data.Corefile` key contains the Corefile configuration for the DNS server. Changes to the `forwardPlugin` triggers a rolling update of the CoreDNS daemon set. [role="_additional-resources"] .Additional resources -* For more information on DNS forwarding, see the link:https://coredns.io/plugins/forward/[CoreDNS forward documentation]. +* link:https://coredns.io/plugins/forward/[CoreDNS forward documentation] diff --git a/modules/k8s-nmstate-deploying-nmstate-CLI.adoc b/modules/k8s-nmstate-deploying-nmstate-CLI.adoc index 9735902b2865..0ab90c8daf30 100644 --- a/modules/k8s-nmstate-deploying-nmstate-CLI.adoc +++ b/modules/k8s-nmstate-deploying-nmstate-CLI.adoc @@ -6,7 +6,8 @@ [id="installing-the-kubernetes-nmstate-operator-CLI_{context}"] = Installing the Kubernetes NMState Operator by using the CLI -You can install the Kubernetes NMState Operator by using the OpenShift CLI (`oc)`. After it is installed, the Operator can deploy the NMState State Controller as a daemon set across all of the cluster nodes. +[role="_abstract"] +You can install the Kubernetes NMState Operator by using the OpenShift CLI (`oc)`. After it is installed, the Operator deploys the NMState State Controller as a daemon set across all of the cluster nodes to manage the node network state and configuration. .Prerequisites diff --git a/modules/k8s-nmstate-installing-the-kubernetes-nmstate-operator.adoc b/modules/k8s-nmstate-installing-the-kubernetes-nmstate-operator.adoc index ac419d5c1ca7..8d203deff7e9 100644 --- a/modules/k8s-nmstate-installing-the-kubernetes-nmstate-operator.adoc +++ b/modules/k8s-nmstate-installing-the-kubernetes-nmstate-operator.adoc @@ -7,6 +7,7 @@ [id="installing-the-kubernetes-nmstate-operator-web-console_{context}"] = Installing the Kubernetes NMState Operator by using the web console +[role="_abstract"] You can install the Kubernetes NMState Operator by using the web console. After you install the Kubernetes NMState Operator, the Operator has deployed the NMState State Controller as a daemon set across all of the cluster nodes. .Prerequisites diff --git a/modules/k8s-nmstate-uninstall-operator.adoc b/modules/k8s-nmstate-uninstall-operator.adoc index 25bf4fff0691..80ad38c74adb 100644 --- a/modules/k8s-nmstate-uninstall-operator.adoc +++ b/modules/k8s-nmstate-uninstall-operator.adoc @@ -6,6 +6,9 @@ [id="k8s-nmstate-uninstall-operator_{context}"] = Uninstalling the Kubernetes NMState Operator +[role="_abstract"] +Remove the Kubernetes NMState Operator and related resources when they are no longer needed. + You can use the {olm-first} to uninstall the Kubernetes NMState Operator, but by design {olm} does not delete any associated custom resource definitions (CRDs), custom resources (CRs), or API Services. Before you uninstall the Kubernetes NMState Operator from the `Subcription` resource used by {olm}, identify what Kubernetes NMState Operator resources to delete. This identification ensures that you can delete resources without impacting your running cluster. @@ -73,9 +76,11 @@ INDEX=$(oc get console.operator.openshift.io cluster -o json | jq -r '.spec.plug + [source,terminal] ---- -$ oc patch console.operator.openshift.io cluster --type=json -p "[{\"op\": \"remove\", \"path\": \"/spec/plugins/$INDEX\"}]" <1> +$ oc patch console.operator.openshift.io cluster --type=json -p "[{\"op\": \"remove\", \"path\": \"/spec/plugins/$INDEX\"}]" ---- -<1> `INDEX` is an auxiliary variable. You can specify a different name for this variable. ++ + +** `INDEX` is an auxiliary variable. You can specify a different name for this variable. . Delete all the custom resource definitions (CRDs), such as `nmstates.nmstate.io`, by running the following commands: + diff --git a/modules/nw-bpfman-infw-about.adoc b/modules/nw-bpfman-infw-about.adoc index 3223a14f51ab..0277a246b50c 100644 --- a/modules/nw-bpfman-infw-about.adoc +++ b/modules/nw-bpfman-infw-about.adoc @@ -2,10 +2,13 @@ // // * networking/network_security/ingress-node-firewall-operator.adoc -:_mod-docs-content-type: PROCEDURE +:_mod-docs-content-type: CONCEPT [id="ingress-node-firewall-operator_{context}"] = Ingress Node Firewall Operator integration +[role="_abstract"] +Learn when to use eBPF Manager to load and manage Ingress Node Firewall programs. + The Ingress Node Firewall uses link:https://www.kernel.org/doc/html/latest/bpf/index.html[eBPF] programs to implement some of its key firewall functionality. By default these eBPF programs are loaded into the kernel using a mechanism specific to the Ingress Node Firewall. You can configure the Ingress Node Firewall Operator to use the eBPF Manager Operator for loading and managing these programs instead. When this integration is enabled, the following limitations apply: diff --git a/modules/nw-bpfman-infw-configure.adoc b/modules/nw-bpfman-infw-configure.adoc index 2a055a0cacad..d864dd86f6e8 100644 --- a/modules/nw-bpfman-infw-configure.adoc +++ b/modules/nw-bpfman-infw-configure.adoc @@ -6,6 +6,9 @@ [id="bpfman-infw-configure_{context}"] = Configuring Ingress Node Firewall Operator to use the eBPF Manager Operator +[role="_abstract"] +Configure the Ingress Node Firewall to use eBPF Manager for program lifecycle control. + The Ingress Node Firewall uses link:https://www.kernel.org/doc/html/latest/bpf/index.html[eBPF] programs to implement some of its key firewall functionality. By default these eBPF programs are loaded into the kernel using a mechanism specific to the Ingress Node Firewall. As a cluster administrator, you can configure the Ingress Node Firewall Operator to use the eBPF Manager Operator for loading and managing these programs instead, adding additional security and observability functionality. diff --git a/modules/nw-controlling-dns-pod-placement.adoc b/modules/nw-controlling-dns-pod-placement.adoc index c03d76ac92b5..37cef8a46679 100644 --- a/modules/nw-controlling-dns-pod-placement.adoc +++ b/modules/nw-controlling-dns-pod-placement.adoc @@ -6,6 +6,9 @@ [id="nw-controlling-dns-pod-placement_{context}"] = Controlling DNS pod placement +[role="_abstract"] +Control where CoreDNS and node-resolver pods run by using taints, tolerations, and selectors. + The DNS Operator has two daemon sets: one for CoreDNS called `dns-default` and one for managing the `/etc/hosts` file called `node-resolver`. You can assign and run CoreDNS pods on specified nodes. For example, if the cluster administrator has configured security policies that prohibit communication between pairs of nodes, you can configure CoreDNS pods to run on a restricted set of nodes. @@ -13,7 +16,7 @@ You can assign and run CoreDNS pods on specified nodes. For example, if the clus DNS service is available to all pods if the following circumstances are true: * DNS pods are running on some nodes in the cluster. -* The nodes on which DNS pods are not running have network connectivity to nodes on which DNS pods are running, +* The nodes on which DNS pods are not running have network connectivity to nodes on which DNS pods are running, The `node-resolver` daemon set must run on every node host because it adds an entry for the cluster image registry to support pulling images. The `node-resolver` pods have only one job: to look up the `image-registry.openshift-image-registry.svc` service's cluster IP address and add it to `/etc/hosts` on the node host so that the container runtime can resolve the service name. @@ -33,12 +36,12 @@ As a cluster administrator, you can use a custom node selector to configure the + [source,terminal] ---- -$ oc adm taint nodes dns-only=abc:NoExecute <1> +$ oc adm taint nodes dns-only=abc:NoExecute ---- + -<1> Replace `` with the actual name of the node. +** Replace `` with the actual name of the node. -. Modify the DNS Operator object named `default` to include the corresponding toleration by entering the following command: +. Modify the DNS Operator object named `default` to include the corresponding toleration by entering the following command: + [source,terminal] ---- @@ -49,17 +52,23 @@ $ oc edit dns.operator/default + [source,yaml] ---- - spec: - nodePlacement: - tolerations: - - effect: NoExecute - key: "dns-only" <1> - operator: Equal - value: abc - tolerationSeconds: 3600 <2> +apiVersion: operator.openshift.io/v1 +kind: DNS +metadata: + name: default +spec: + nodePlacement: + tolerations: + - effect: NoExecute + key: "dns-only" + operator: Equal + value: abc + tolerationSeconds: 3600 ---- -<1> If the `key` field is set to `dns-only`, it can be tolerated indefinitely. -<2> The `tolerationSeconds` field is optional. ++ + +** If the `key` field is set to `dns-only`, it can be tolerated indefinitely. +** The `tolerationSeconds` field is optional. . Optional: To specify node placement using a node selector, modify the default DNS Operator: @@ -67,10 +76,14 @@ $ oc edit dns.operator/default + [source,yaml] ---- - spec: - nodePlacement: - nodeSelector: <1> - node-role.kubernetes.io/control-plane: "" +apiVersion: operator.openshift.io/v1 +kind: DNS +metadata: + name: default +spec: + nodePlacement: + nodeSelector: + node-role.kubernetes.io/control-plane: "" ---- + -<1> This node selector ensures that the CoreDNS pods run only on control plane nodes. \ No newline at end of file +** The `nodeselector` field in the example ensures that the CoreDNS pods run only on control plane nodes. \ No newline at end of file diff --git a/modules/nw-dns-cache-tuning.adoc b/modules/nw-dns-cache-tuning.adoc index f7920447c2ef..9499e4862c6b 100644 --- a/modules/nw-dns-cache-tuning.adoc +++ b/modules/nw-dns-cache-tuning.adoc @@ -6,11 +6,7 @@ = Tuning the CoreDNS cache [role="_abstract"] -To reduce the load on upstream DNS resolvers, you can tune the CoreDNS cache by adjusting the duration of positive and negative caching. This process involves modifying the time-to-live (TTL) values within the DNS Operator object to control how long query responses are stored. - -For CoreDNS, you can configure the maximum duration of both successful or unsuccessful caching, also known respectively as positive or negative caching. Tuning the cache duration of DNS query responses can reduce the load for any upstream DNS resolvers. - -You can shorten the TTL of the DNS record by setting a lower positive cache. You cannot increase the TTL on the DNS record by setting a higher positive cache. The maximum cache is the lower of the TTL of the DNS record or the positive cache. +For CoreDNS, you can configure the maximum duration of both successful or unsuccessful caching, also known respectively as positive or negative caching. Tuning the cache duration of DNS query responses can reduce the load for any upstream DNS resolvers. [WARNING] ==== @@ -37,12 +33,15 @@ metadata: name: default spec: cache: - positiveTTL: 1h <1> - negativeTTL: 0.5h10m <2> + positiveTTL: 1h + negativeTTL: 0.5h10m ---- + -<1> The string value `1h` is converted to its respective number of seconds by CoreDNS. If this field is omitted, the value is assumed to be `0s` and the cluster uses the internal default value of `900s` as a fallback. -<2> The string value can be a combination of units such as `0.5h10m` and is converted to its respective number of seconds by CoreDNS. If this field is omitted, the value is assumed to be `0s` and the cluster uses the internal default value of `30s` as a fallback. + +where: + +`spec.cache.positiveTTL`:: Specifies a string value that is converted to its respective number of seconds by CoreDNS. If this field is omitted, the value is assumed to be `0s` and the cluster uses the internal default value of `900s` as a fallback. +`spec.cache.negativeTTL`:: Specifies a string value that is converted to its respective number of seconds by CoreDNS. If this field is omitted, the value is assumed to be `0s` and the cluster uses the internal default value of `30s` as a fallback. .Verification @@ -65,4 +64,4 @@ $ oc get configmap/dns-default -n openshift-dns -o yaml [role="_additional-resources"] .Additional resources -For more information on caching, see link:https://coredns.io/plugins/cache/[CoreDNS cache]. +* link:https://coredns.io/plugins/cache/[CoreDNS cache] diff --git a/modules/nw-dns-forward.adoc b/modules/nw-dns-forward.adoc index 36b0ae6af165..5f34a965dbb0 100644 --- a/modules/nw-dns-forward.adoc +++ b/modules/nw-dns-forward.adoc @@ -6,6 +6,9 @@ [id="nw-dns-forward_{context}"] = Using DNS forwarding +[role="_abstract"] +Configure DNS forwarding servers and upstream resolvers for the cluster. + You can use DNS forwarding to override the default forwarding configuration in the `/etc/resolv.conf` file in the following ways: * Specify name servers (`spec.servers`) for every zone. If the forwarded zone is the ingress domain managed by {product-title}, then the upstream name server must be authorized for the domain. @@ -62,43 +65,47 @@ spec: nodePlacement: {} operatorLogLevel: Normal servers: - - name: example-server <1> + - name: example-server zones: - - example.com <2> + - example.com forwardPlugin: - policy: Random <3> - upstreams: <4> + policy: Random + upstreams: - 1.1.1.1 - 2.2.2.2:5353 - upstreamResolvers: <5> - policy: Random <6> - protocolStrategy: "" <7> - transportConfig: {} <8> + upstreamResolvers: + policy: Random + protocolStrategy: "" + transportConfig: {} upstreams: - - type: SystemResolvConf <9> + - type: SystemResolvConf - type: Network - address: 1.2.3.4 <10> - port: 53 <11> + address: 1.2.3.4 + port: 53 status: clusterDomain: cluster.local clusterIP: x.y.z.10 conditions: ... ---- -<1> Must comply with the `rfc6335` service name syntax. -<2> Must conform to the definition of a subdomain in the `rfc1123` service name syntax. The cluster domain, `cluster.local`, is an invalid subdomain for the `zones` field. -<3> Defines the policy to select upstream resolvers listed in the `forwardPlugin`. Default value is `Random`. You can also use the values `RoundRobin`, and `Sequential`. -<4> A maximum of 15 `upstreams` is allowed per `forwardPlugin`. -<5> You can use `upstreamResolvers` to override the default forwarding policy and forward DNS resolution to the specified DNS resolvers (upstream resolvers) for the default domain. If you do not provide any upstream resolvers, the DNS name queries go to the servers declared in `/etc/resolv.conf`. -<6> Determines the order in which upstream servers listed in `upstreams` are selected for querying. You can specify one of these values: `Random`, `RoundRobin`, or `Sequential`. The default value is `Sequential`. -<7> When omitted, the platform chooses a default, normally the protocol of the original client request. Set to `TCP` to specify that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. -<8> Used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver. -<9> You can specify two types of `upstreams`: `SystemResolvConf` or `Network`. `SystemResolvConf` configures the upstream to use `/etc/resolv.conf` and `Network` defines a `Networkresolver`. You can specify one or both. -<10> If the specified type is `Network`, you must provide an IP address. The `address` field must be a valid IPv4 or IPv6 address. -<11> If the specified type is `Network`, you can optionally provide a port. The `port` field must have a value between `1` and `65535`. If you do not specify a port for the upstream, the default port is 853. ++ +where: + +`spec.servers.name`:: Must comply with the `rfc6335` service name syntax. +`spec.servers.zones`:: Must conform to the `rfc1123` subdomain syntax. The cluster domain `cluster.local` is invalid for `zones`. +`spec.servers.forwardPlugin.policy`:: Specifies the upstream selection policy. Defaults to `Random`; allowed values are `RoundRobin` and `Sequential`. +`spec.servers.forwardPlugin.upstreams`:: Must provide no more than 15 `upstreams` entries per `forwardPlugin`. +`spec.upstreamResolvers.upstreams`:: Specifies an `upstreamResolvers` to override the default forwarding policy and forward DNS resolution to the specified DNS resolvers (upstream resolvers) for the default domain. You can use this field when you need custom upstream resolvers; otherwise queries use the servers declared in `/etc/resolv.conf`. +`spec.upstreamResolvers.policy`:: Specifies the upstream selection order. Defaults to `Sequential`; allowed values are `Random`, `RoundRobin`, and `Sequential`. +`spec.upstreamResolvers.protocolStrategy`:: Specify `TCP` to force the protocol to use for upstream DNS requests, even if the request uses UDP. Valid values are `TCP` and omitted. When omitted, the platform chooses a default, normally the protocol of the original client request. +`spec.upstreamResolvers.transportConfig`:: Specifies the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver. +`spec.upstreamResolvers.upstreams.type`:: Specifies two types of `upstreams`: `SystemResolvConf` or `Network`. `SystemResolvConf` configures the upstream to use `/etc/resolv.conf` and `Network` defines a `Networkresolver`. You can specify one or both. +`spec.upstreamResolvers.upstreams.address`:: Specifies a valid IPv4 or IPv6 address when type is `Network`. +`spec.upstreamResolvers.upstreams.port`:: Specifies an optional field to provide a port number. Valid values are between `1` and `65535`; defaults to 853 when omitted. + //TODO OSDOCS-11830 This YAML looks like it has a syntax problem after upstreamResolvers [role="_additional-resources"] .Additional resources -* For more information on DNS forwarding, see the link:https://coredns.io/plugins/forward/[CoreDNS forward documentation]. +* link:https://coredns.io/plugins/forward/[CoreDNS forward documentation] diff --git a/modules/nw-dns-loglevel.adoc b/modules/nw-dns-loglevel.adoc index 8c2d1b3cbde7..ed4623df1df0 100644 --- a/modules/nw-dns-loglevel.adoc +++ b/modules/nw-dns-loglevel.adoc @@ -5,6 +5,9 @@ [id="nw-dns-loglevel_{context}"] = Setting the CoreDNS log level +[role="_abstract"] +Set CoreDNS log levels to control the detail of DNS error logging. + Log levels for CoreDNS and the CoreDNS Operator are set by using different methods. You can configure the CoreDNS log level to determine the amount of detail in logged error messages. The valid values for CoreDNS log level are `Normal`, `Debug`, and `Trace`. The default `logLevel` is `Normal`. [NOTE] diff --git a/modules/nw-dns-operator-logs.adoc b/modules/nw-dns-operator-logs.adoc index e39d0ef406be..8cff93a75d1f 100644 --- a/modules/nw-dns-operator-logs.adoc +++ b/modules/nw-dns-operator-logs.adoc @@ -6,11 +6,12 @@ [id="nw-dns-operator-logs_{context}"] = Viewing DNS Operator logs -You can view DNS Operator logs by using the `oc logs` command. +[role="_abstract"] +You can view DNS Operator logs to troubleshoot DNS issues, verify configuration changes, and monitor activity by using the by using the `oc logs` command. .Procedure -* View the logs of the DNS Operator: +* View the logs of the DNS Operator by running the following command: + [source,terminal] ---- diff --git a/modules/nw-dns-operator-managementState.adoc b/modules/nw-dns-operator-managementState.adoc index 576356dff50d..59851c15fb56 100644 --- a/modules/nw-dns-operator-managementState.adoc +++ b/modules/nw-dns-operator-managementState.adoc @@ -6,6 +6,9 @@ [id="nw-dns-operator-managementState_{context}"] = Changing the DNS Operator managementState +[role="_abstract"] +You can change from the default `Managed` state to `Unmanaged` to stop the DNS Operator from managing its resources in order to apply a workaround or test a configuration change. + The DNS Operator manages the CoreDNS component to provide a name resolution service for pods and services in the cluster. The `managementState` of the DNS Operator is set to `Managed` by default, which means that the DNS Operator is actively managing its resources. You can change it to `Unmanaged`, which means the DNS Operator is not managing its resources. The following are use cases for changing the DNS Operator `managementState`: @@ -14,9 +17,14 @@ The following are use cases for changing the DNS Operator `managementState`: * You are a cluster administrator and have reported an issue with CoreDNS, but need to apply a workaround until the issue is fixed. You can set the `managementState` field of the DNS Operator to `Unmanaged` to apply the workaround. +[NOTE] +==== +You cannot upgrade while the `managementState` is set to `Unmanaged`. +==== + .Procedure -. Change `managementState` to `Unmanaged` in the DNS Operator: +. Change `managementState` to `Unmanaged` in the DNS Operator by running the following command: + [source,terminal] ---- @@ -27,10 +35,5 @@ oc patch dns.operator.openshift.io default --type merge --patch '{"spec":{"manag + [source,terminal] ---- -$ oc get dns.operator.openshift.io default -ojsonpath='{.spec.managementState}' +$ oc get dns.operator.openshift.io default -ojsonpath='{.spec.managementState}' ---- -+ -[NOTE] -==== -You cannot upgrade while the `managementState` is set to `Unmanaged`. -==== diff --git a/modules/nw-dns-operator-status.adoc b/modules/nw-dns-operator-status.adoc index 634c111c6033..6f06bae6ce63 100644 --- a/modules/nw-dns-operator-status.adoc +++ b/modules/nw-dns-operator-status.adoc @@ -6,8 +6,8 @@ [id="nw-dns-operator-status_{context}"] = Checking DNS Operator status -You can inspect the status and view the details of the DNS Operator -using the `oc describe` command. +[role="_abstract"] +You can inspect the status and view the details of the DNS Operator by using the `oc describe` command. .Procedure diff --git a/modules/nw-dns-operator.adoc b/modules/nw-dns-operator.adoc index 5c265a3652e6..0ed0fc35930e 100644 --- a/modules/nw-dns-operator.adoc +++ b/modules/nw-dns-operator.adoc @@ -5,6 +5,9 @@ [id="nw-dns-operator_{context}"] = Checking the status of the DNS Operator +[role="_abstract"] +You can check the DNS Operator deployment and cluster operator status. The DNS Operator is deployed during installation with a `Deployment` object. + The DNS Operator implements the `dns` API from the `operator.openshift.io` API group. The Operator deploys CoreDNS using a daemon set, creates a service for the daemon set, and configures the kubelet to instruct pods to use the CoreDNS @@ -12,8 +15,6 @@ service IP address for name resolution. .Procedure -The DNS Operator is deployed during installation with a `Deployment` object. - . Use the `oc get` command to view the deployment status: + [source,terminal] diff --git a/modules/nw-dns-operatorloglevel.adoc b/modules/nw-dns-operatorloglevel.adoc index cfaed7aa4cc8..8e05d3d1e913 100644 --- a/modules/nw-dns-operatorloglevel.adoc +++ b/modules/nw-dns-operatorloglevel.adoc @@ -5,7 +5,10 @@ [id="nw-dns-operatorloglevel_{context}"] = Setting the CoreDNS Operator log level -Log levels for CoreDNS and CoreDNS Operator are set by using different methods. Cluster administrators can configure the Operator log level to more quickly track down OpenShift DNS issues. The valid values for `operatorLogLevel` are `Normal`, `Debug`, and `Trace`. `Trace` has the most detailed information. The default `operatorlogLevel` is `Normal`. There are seven logging levels for Operator issues: Trace, Debug, Info, Warning, Error, Fatal, and Panic. After the logging level is set, log entries with that severity or anything above it will be logged. +[role="_abstract"] +You can configure the Operator log level to quickly track down OpenShift DNS issues. + +The valid values for `operatorLogLevel` are `Normal`, `Debug`, and `Trace`. `Trace` has the most detailed information. The default `operatorlogLevel` is `Normal`. There are seven logging levels for Operator issues: Trace, Debug, Info, Warning, Error, Unrecoverable, and Panic. After the logging level is set, log entries with that severity or anything above it will be logged. * `operatorLogLevel: "Normal"` sets `logrus.SetLogLevel("Info")`. diff --git a/modules/nw-dns-view.adoc b/modules/nw-dns-view.adoc index 436cfa805747..0cb3a7a4d8e5 100644 --- a/modules/nw-dns-view.adoc +++ b/modules/nw-dns-view.adoc @@ -6,6 +6,9 @@ [id="nw-dns-view_{context}"] = View the default DNS +[role="_abstract"] +View the default DNS resource and cluster DNS settings to verify the DNS configuration or troubleshoot DNS issues. + Every new {product-title} installation has a `dns.operator` named `default`. .Procedure @@ -28,14 +31,15 @@ API Version: operator.openshift.io/v1 Kind: DNS ... Status: - Cluster Domain: cluster.local <1> - Cluster IP: 172.30.0.10 <2> + Cluster Domain: cluster.local + Cluster IP: 172.30.0.10 ... ---- -<1> The Cluster Domain field is the base DNS domain used to construct fully -qualified pod and service domain names. -<2> The Cluster IP is the address pods query for name resolution. The IP is -defined as the 10th address in the service CIDR range. ++ +where: + +`Status.Cluster Domain`:: Specifiecs the base DNS domain used to construct fully qualified pod and service domain names. +`Status.Cluster IP`:: Specifies the address that pods query for name resolution. The IP is defined as the 10th address in the service CIDR range. . To find the service CIDR range, such as `172.30.0.0/16`, of your cluster, use the `oc get` command: + diff --git a/modules/nw-dns-viewlog.adoc b/modules/nw-dns-viewlog.adoc index d9e06d00aa83..ea11303869f4 100644 --- a/modules/nw-dns-viewlog.adoc +++ b/modules/nw-dns-viewlog.adoc @@ -5,7 +5,8 @@ [id="nw-dns-viewlog_{context}"] = Viewing the CoreDNS logs -You can view CoreDNS logs by using the `oc logs` command. +[role="_abstract"] +You can view CoreDNS pod logs to troubleshoot DNS issues by using the `oc logs` command. .Procedure @@ -22,4 +23,5 @@ $ oc -n openshift-dns logs -c dns ---- $ oc -n openshift-dns logs -c dns -l dns.operator.openshift.io/daemonset-dns=default -f --max-log-requests= <1> ---- -<1> Specifies the number of DNS pods to stream logs from. The maximum is 6. ++ +** ``: Specifies the number of DNS pods to stream logs from. The maximum is 6. diff --git a/modules/nw-infw-operator-config-object.adoc b/modules/nw-infw-operator-config-object.adoc index 127e2c92af89..591c67e26113 100644 --- a/modules/nw-infw-operator-config-object.adoc +++ b/modules/nw-infw-operator-config-object.adoc @@ -4,7 +4,10 @@ :_mod-docs-content-type: CONCEPT [id="nw-infw-operator-config-object_{context}"] -== Ingress Node Firewall configuration object += Ingress Node Firewall configuration object + +[role="_abstract"] +Review configuration fields so you can define how the Operator deploys the firewall. The fields for the Ingress Node Firewall configuration object are described in the following table: @@ -28,6 +31,11 @@ A node selection constraint used to target nodes through specified node labels. [source,yaml] ---- +apiVersion: ingressnodefirewall.openshift.io/v1alpha1 +kind: IngressNodeFirewallConfig +metadata: + name: ingressnodefirewallconfig + namespace: openshift-ingress-node-firewall spec: nodeSelector: node-role.kubernetes.io/worker: "" @@ -55,7 +63,7 @@ endif::openshift-rosa,openshift-rosa-hcp[] ifdef::openshift-rosa,openshift-rosa-hcp[] To start, the Operator consumes an `IngressNodeFirewallConfig` in order to generate the daemonset on all nodes. After this is created, additional firewall rule objects can be created. -endif::openshift-rosa,openshift-rosa-hcp[] +endif::[] ==== [id="nw-ingress-node-firewall-example-cr-2_{context}"] diff --git a/modules/nw-infw-operator-cr.adoc b/modules/nw-infw-operator-cr.adoc index bda1124b1c47..60073d18f001 100644 --- a/modules/nw-infw-operator-cr.adoc +++ b/modules/nw-infw-operator-cr.adoc @@ -6,7 +6,10 @@ [id="nw-infw-operator-cr_{context}"] = Ingress Node Firewall Operator -The Ingress Node Firewall Operator provides ingress firewall rules at a node level by deploying the daemon set to nodes you specify and manage in the firewall configurations. To deploy the daemon set, you create an `IngressNodeFirewallConfig` custom resource (CR). The Operator applies the `IngressNodeFirewallConfig` CR to create ingress node firewall daemon set `daemon`, which run on all nodes that match the `nodeSelector`. +[role="_abstract"] +The Ingress Node Firewall Operator provides ingress firewall rules at a node level that you can specify and manage in the firewall configurations. + +To deploy the daemon set created by the Operator, you create an `IngressNodeFirewallConfig` custom resource (CR). The Operator applies the `IngressNodeFirewallConfig` CR to create ingress node firewall daemon set `daemon`, which run on all nodes that match the `nodeSelector`. You configure `rules` of the `IngressNodeFirewall` CR and apply them to clusters using the `nodeSelector` and setting values to "true". diff --git a/modules/nw-infw-operator-deploying.adoc b/modules/nw-infw-operator-deploying.adoc index 902f3c3b76bf..f0f0e0048021 100644 --- a/modules/nw-infw-operator-deploying.adoc +++ b/modules/nw-infw-operator-deploying.adoc @@ -6,13 +6,14 @@ [id="nw-infw-operator-deploying_{context}"] = Deploying Ingress Node Firewall Operator +[role="_abstract"] +To deploy the Ingress Node Firewall Operator, create a `IngressNodeFirewallConfig` custom resource that will deploy the Operator's daemon set. You can deploy one or multiple `IngressNodeFirewall` CRDs to nodes by applying firewall rules. + .Prerequisite * The Ingress Node Firewall Operator is installed. .Procedure -To deploy the Ingress Node Firewall Operator, create a `IngressNodeFirewallConfig` custom resource that will deploy the Operator's daemon set. You can deploy one or multiple `IngressNodeFirewall` CRDs to nodes by applying firewall rules. - . Create the `IngressNodeFirewallConfig` inside the `openshift-ingress-node-firewall` namespace named `ingressnodefirewallconfig`. . Run the following command to deploy Ingress Node Firewall Operator rules: diff --git a/modules/nw-infw-operator-installing-cli.adoc b/modules/nw-infw-operator-installing-cli.adoc index 3b08dc18ed34..93906e1212fd 100644 --- a/modules/nw-infw-operator-installing-cli.adoc +++ b/modules/nw-infw-operator-installing-cli.adoc @@ -6,12 +6,8 @@ [id="installing-infw-operator_{context}"] = Installing the Ingress Node Firewall Operator -As a cluster administrator, you can install the Ingress Node Firewall Operator by using the {product-title} CLI or the web console. - -[id="install-operator-cli_{context}"] -== Installing the Ingress Node Firewall Operator using the CLI - -As a cluster administrator, you can install the Operator using the CLI. +[role="_abstract"] +As a cluster administrator, you can install the Ingress Node Firewall Operator to enable node-level ingress firewalling by using the {product-title} CLI. .Prerequisites diff --git a/modules/nw-infw-operator-installing-console.adoc b/modules/nw-infw-operator-installing-console.adoc index aa21c60e9caa..b6c931da8b85 100644 --- a/modules/nw-infw-operator-installing-console.adoc +++ b/modules/nw-infw-operator-installing-console.adoc @@ -4,9 +4,10 @@ :_mod-docs-content-type: PROCEDURE [id="install-operator-web-console_{context}"] -== Installing the Ingress Node Firewall Operator using the web console += Installing the Ingress Node Firewall Operator using the web console -As a cluster administrator, you can install the Operator using the web console. +[role="_abstract"] +As a cluster administrator, you can install the Ingress Node Firewall Operator to enable node-level ingress firewalling by using the web console. .Prerequisites @@ -15,7 +16,6 @@ As a cluster administrator, you can install the Operator using the web console. .Procedure - . Install the Ingress Node Firewall Operator: .. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. diff --git a/modules/nw-infw-operator-rules-object.adoc b/modules/nw-infw-operator-rules-object.adoc index 41a4052b2df5..38d3dd226f23 100644 --- a/modules/nw-infw-operator-rules-object.adoc +++ b/modules/nw-infw-operator-rules-object.adoc @@ -2,10 +2,13 @@ // // * networking/ingress-node-firewall-operator.adoc -:_mod-docs-content-type: CONCEPT +:_mod-docs-content-type: REFERENCE [id="nw-ingress-node-firewall-operator-rules-object_{context}"] = Ingress Node Firewall rules object +[role="_abstract"] +You can review rule fields and examples to define which ingress traffic is allowed or denied by using the Ingress Node Firewall rules object. + The fields for the Ingress Node Firewall rules object are described in the following table: .Ingress Node Firewall rules object @@ -81,7 +84,7 @@ spec: - eth0 nodeSelector: matchLabels: - : <1> + : ingress: - sourceCIDRs: - 172.16.0.0/12 @@ -108,14 +111,16 @@ spec: icmpType: 128 #ICMPV6 Echo request action: Deny ---- -<1> A and a must exist on the node and must match the `nodeselector` label and value applied to the nodes you want the `ingressfirewallconfig` CR to run on. The can be `true` or `false`. By using `nodeSelector` labels, you can target separate groups of nodes to apply different rules to using the `ingressfirewallconfig` CR. ++ +A `` and a `` must exist on the node and must match the `nodeselector` label and value applied to the nodes you want the `ingressfirewallconfig` CR to run on. The `` can be `true` or `false`. By using `nodeSelector` labels, you can target separate groups of nodes to apply different rules to using the `ingressfirewallconfig` CR. + [id="nw-ingress-node-firewall-zero-trust-example-cr_{context}"] == Zero trust Ingress Node Firewall rules object example Zero trust Ingress Node Firewall rules can provide additional security to multi-interface clusters. For example, you can use zero trust Ingress Node Firewall rules to drop all traffic on a specific interface except for SSH. -A complete configuration of a zero trust Ingress Node Firewall rule set is specified in the following example: +A complete configuration of a zero trust Ingress Node Firewall rule for a network-interface cluster is specified in the following example: [IMPORTANT] ==== @@ -131,13 +136,13 @@ metadata: name: ingressnodefirewall-zero-trust spec: interfaces: - - eth1 <1> + - eth1 nodeSelector: matchLabels: - : <2> + : ingress: - sourceCIDRs: - - 0.0.0.0/0 <3> + - 0.0.0.0/0 rules: - order: 10 protocolConfig: @@ -146,9 +151,5 @@ spec: ports: 22 action: Allow - order: 20 - action: Deny <4> ----- -<1> Network-interface cluster -<2> The and needs to match the `nodeSelector` label and value applied to the specific nodes with which you wish to apply the `ingressfirewallconfig` CR. -<3> `0.0.0.0/0` set to match any CIDR -<4> `action` set to `Deny` + action: Deny +---- \ No newline at end of file diff --git a/modules/nw-infw-operator-troubleshooting.adoc b/modules/nw-infw-operator-troubleshooting.adoc index 39eae22026e5..9003b1b9b171 100644 --- a/modules/nw-infw-operator-troubleshooting.adoc +++ b/modules/nw-infw-operator-troubleshooting.adoc @@ -6,6 +6,11 @@ [id="nw-infw-operator-troubleshooting_{context}"] = Troubleshooting the Ingress Node Firewall Operator +[role="_abstract"] +You can verify the status and view the logs to diagnose ingress firewall deployment or rule issues. + +.Procedure + * Run the following command to list installed Ingress Node Firewall custom resource definitions (CRD): + [source,terminal] diff --git a/modules/nw-infw-operator-viewing.adoc b/modules/nw-infw-operator-viewing.adoc index 6d3671c30a3e..976ee74c8aea 100644 --- a/modules/nw-infw-operator-viewing.adoc +++ b/modules/nw-infw-operator-viewing.adoc @@ -6,6 +6,9 @@ [id="nw-infw-operator-viewing_{context}"] = Viewing Ingress Node Firewall Operator rules +[role="_abstract"] +Inspect existing rules and configs to confirm the firewall is applied as intended. + .Procedure . Run the following command to view all current rules : diff --git a/modules/viewing-stats-collected-kubernetes-nmtate-op.adoc b/modules/viewing-stats-collected-kubernetes-nmtate-op.adoc index f13493fbf8cc..6f9354569703 100644 --- a/modules/viewing-stats-collected-kubernetes-nmtate-op.adoc +++ b/modules/viewing-stats-collected-kubernetes-nmtate-op.adoc @@ -6,7 +6,8 @@ [id="viewing-stats-collected-kubernetes-nmtate-op_{context}"] = Viewing metrics collected by the Kubernetes NMState Operator -The Kubernetes NMState Operator, `kubernetes-nmstate-operator`, can collect metrics from the `kubernetes_nmstate_features_applied` component and expose them as ready-to-use metrics. As a use case for viewing metrics, consider a situation where you created a `NodeNetworkConfigurationPolicy` custom resource and you want to confirm that the policy is active. +[role="_abstract"] +The Kubernetes NMState Operator, `kubernetes-nmstate-operator`, can collect metrics from the `kubernetes_nmstate_features_applied` component and expose them as ready-to-use metrics. As a use case for viewing metrics, consider a situation where you created a `NodeNetworkConfigurationPolicy` custom resource (CR) and you want to confirm that the policy is active. [NOTE] ==== diff --git a/networking/networking_operators/dns-operator.adoc b/networking/networking_operators/dns-operator.adoc index c1a23814e272..8469a1f70f4b 100644 --- a/networking/networking_operators/dns-operator.adoc +++ b/networking/networking_operators/dns-operator.adoc @@ -6,6 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] +[role="_abstract"] In {product-title}, the DNS Operator deploys and manages a CoreDNS instance to provide a name resolution service to pods inside the cluster, enables DNS-based Kubernetes Service discovery, and resolves internal `cluster.local` names. ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] @@ -36,9 +37,6 @@ include::modules/nw-dns-operatorloglevel.adoc[leveloffset=+1] include::modules/nw-dns-cache-tuning.adoc[leveloffset=+1] -[id="dns-operator-advanced-tasks"] -== Advanced tasks - include::modules/nw-dns-operator-managementState.adoc[leveloffset=+2] include::modules/nw-controlling-dns-pod-placement.adoc[leveloffset=+2] diff --git a/networking/networking_operators/ingress-node-firewall-operator.adoc b/networking/networking_operators/ingress-node-firewall-operator.adoc index 4514a5c5c207..f2d5ac8df6ba 100644 --- a/networking/networking_operators/ingress-node-firewall-operator.adoc +++ b/networking/networking_operators/ingress-node-firewall-operator.adoc @@ -6,6 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] +[role="_abstract"] The Ingress Node Firewall Operator provides a stateless, eBPF-based firewall for managing node-level ingress traffic in {product-title}. include::modules/nw-infw-operator-cr.adoc[leveloffset=+1]