diff --git a/.changes/unreleased/operator-Added-20260508-140411.yaml b/.changes/unreleased/operator-Added-20260508-140411.yaml new file mode 100644 index 000000000..b459dd6a5 --- /dev/null +++ b/.changes/unreleased/operator-Added-20260508-140411.yaml @@ -0,0 +1,4 @@ +project: operator +kind: Added +body: Added `rpk-k8s multicluster bundle` for collecting cross-cluster operator diagnostics (per-peer pod, deployment, TLS, raft status, logs, multi-sample `/metrics`) into a single zip. Discovers peers from labelled kubeconfig cache Secrets given any one peer's kubeconfig. Includes a chart-level binding of the operator ServiceAccount to the `metrics-reader` ClusterRole so the bundle (and the existing ServiceMonitor) can scrape `/metrics` without 403. +time: 2026-05-08T14:04:11.47156921+02:00 diff --git a/licenses/third_party.md b/licenses/third_party.md index 0b883e9e4..48427ca80 100644 --- a/licenses/third_party.md +++ b/licenses/third_party.md @@ -75,6 +75,7 @@ run `task generate:third-party-licenses-list` | github.com/cyphar/filepath-securejoin | [MPL-2.0](https://github.com/cyphar/filepath-securejoin/blob/v0.6.1/COPYING.md) | | github.com/cyphar/filepath-securejoin | [BSD-3-Clause](https://github.com/cyphar/filepath-securejoin/blob/v0.6.1/COPYING.md) | | github.com/davecgh/go-spew/spew | [ISC](https://github.com/davecgh/go-spew/blob/d8f796af33cc/LICENSE) | +| github.com/docker/go-units | [Apache-2.0](https://github.com/docker/go-units/blob/v0.5.0/LICENSE) | | github.com/emicklei/go-restful/v3 | [MIT](https://github.com/emicklei/go-restful/blob/v3.13.0/LICENSE) | | github.com/evanphx/json-patch | [BSD-3-Clause](https://github.com/evanphx/json-patch/blob/v5.9.11/LICENSE) | | github.com/evanphx/json-patch/v5 | [BSD-3-Clause](https://github.com/evanphx/json-patch/blob/v5.9.11/v5/LICENSE) | diff --git a/operator/chart/rbac.go b/operator/chart/rbac.go index 5031b11bf..8a247a211 100644 --- a/operator/chart/rbac.go +++ b/operator/chart/rbac.go @@ -166,8 +166,40 @@ func ClusterRoleBindings(dot *helmette.Dot) []rbacv1.ClusterRoleBinding { return nil } - // NB: We skip over making a binding for the metrics viewer role. - var bindings []rbacv1.ClusterRoleBinding + // Bind the operator's own ServiceAccount to the metrics-reader + // ClusterRole emitted in ClusterRoles. controller-runtime's metrics + // server enforces authentication + authorization by default, so + // anything authenticating as the operator SA — including the bundled + // ServiceMonitor scraping with the pod's projected token, and tools + // like `rpk k8s multicluster bundle` — needs `nonResourceURLs: + // /metrics get`. Other consumers (e.g. an external Prometheus running + // under its own SA) can bind to the same ClusterRole separately. + metricsRoleName := cleanForK8sWithSuffix(Fullname(dot)+"-"+dot.Release.Namespace, "metrics-reader") + bindings := []rbacv1.ClusterRoleBinding{ + { + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: metricsRoleName, + Labels: Labels(dot), + Annotations: values.Annotations, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: metricsRoleName, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: ServiceAccountName(dot), + Namespace: dot.Release.Namespace, + }, + }, + }, + } for _, bundle := range rbacBundles(dot) { if !bundle.Enabled { continue diff --git a/operator/chart/templates/_rbac.go.tpl b/operator/chart/templates/_rbac.go.tpl index 9cde9d0e6..f95eac695 100644 --- a/operator/chart/templates/_rbac.go.tpl +++ b/operator/chart/templates/_rbac.go.tpl @@ -61,7 +61,8 @@ {{- (dict "r" (coalesce nil)) | toJson -}} {{- break -}} {{- end -}} -{{- $bindings := (coalesce nil) -}} +{{- $metricsRoleName := (get (fromJson (include "operator.cleanForK8sWithSuffix" (dict "a" (list (printf "%s%s" (printf "%s%s" (get (fromJson (include "operator.Fullname" (dict "a" (list $dot)))) "r") "-") $dot.Release.Namespace) "metrics-reader")))) "r") -}} +{{- $bindings := (list (mustMergeOverwrite (dict "metadata" (dict) "roleRef" (dict "apiGroup" "" "kind" "" "name" "")) (mustMergeOverwrite (dict) (dict "apiVersion" "rbac.authorization.k8s.io/v1" "kind" "ClusterRoleBinding")) (dict "metadata" (mustMergeOverwrite (dict) (dict "name" $metricsRoleName "labels" (get (fromJson (include "operator.Labels" (dict "a" (list $dot)))) "r") "annotations" $values.annotations)) "roleRef" (mustMergeOverwrite (dict "apiGroup" "" "kind" "" "name" "") (dict "apiGroup" "rbac.authorization.k8s.io" "kind" "ClusterRole" "name" $metricsRoleName)) "subjects" (list (mustMergeOverwrite (dict "kind" "" "name" "") (dict "kind" "ServiceAccount" "name" (get (fromJson (include "operator.ServiceAccountName" (dict "a" (list $dot)))) "r") "namespace" $dot.Release.Namespace)))))) -}} {{- range $_, $bundle := (get (fromJson (include "operator.rbacBundles" (dict "a" (list $dot)))) "r") -}} {{- if (not $bundle.Enabled) -}} {{- continue -}} diff --git a/operator/chart/testdata/template-cases.golden.txtar b/operator/chart/testdata/template-cases.golden.txtar index 262d101f3..bdcb0df4a 100644 --- a/operator/chart/testdata/template-cases.golden.txtar +++ b/operator/chart/testdata/template-cases.golden.txtar @@ -660,6 +660,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -2036,6 +2057,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: ctFdYbtL + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: XCatYW-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: XCatYW-default-metrics-reader +subjects: +- kind: ServiceAccount + name: XCatYW + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -3459,6 +3501,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: 8zwp + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: do1u-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: do1u-default-metrics-reader +subjects: +- kind: ServiceAccount + name: do1u + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -5012,6 +5075,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: llRXnk1 + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: zFkw3t-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: zFkw3t-default-metrics-reader +subjects: +- kind: ServiceAccount + name: zFkw3t + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -6709,6 +6793,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: Od + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: 8-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 8-default-metrics-reader +subjects: +- kind: ServiceAccount + name: "8" + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -8103,6 +8208,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: bA9B + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: TODeGK-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: TODeGK-default-metrics-reader +subjects: +- kind: ServiceAccount + name: TODeGK + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -9623,6 +9749,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: 9Fewiv + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: hzsFR-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: hzsFR-default-metrics-reader +subjects: +- kind: ServiceAccount + name: hzsFR + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -11224,6 +11371,29 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + 7eZwch: p + v: fkhSZu + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: HsM6 + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: nwn-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nwn-default-metrics-reader +subjects: +- kind: ServiceAccount + name: EWzZWPEbF + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: 7eZwch: p @@ -12634,6 +12804,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: UMeCRE + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: 5rz4H-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 5rz4H-default-metrics-reader +subjects: +- kind: ServiceAccount + name: 5rz4H + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -14038,6 +14229,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: rrM2 + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: EB-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: EB-default-metrics-reader +subjects: +- kind: ServiceAccount + name: 43f9qCQ + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -15317,6 +15529,29 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + k1: MeUXOg3 + yXu: iPV0Z + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: 2DsVHG + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: tP-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tP-default-metrics-reader +subjects: +- kind: ServiceAccount + name: xVPYSkZt2 + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: k1: MeUXOg3 @@ -16727,6 +16962,28 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + kbD: UEcwv0 + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: D4Ec + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: PcH5-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: PcH5-default-metrics-reader +subjects: +- kind: ServiceAccount + name: PcH5 + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: kbD: UEcwv0 @@ -18313,6 +18570,30 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + B: grOgtF + Pr: RBVP + QVzkdX7Y1I2hjVAXY: h + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: 6gRngW4 + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-6gRngW4-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-6gRngW4-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator-6gRngW4 + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -19776,6 +20057,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: au3C7-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: au3C7-default-metrics-reader +subjects: +- kind: ServiceAccount + name: 9lb + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -21208,6 +21510,28 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + 1jQ9Wuut: FnhwzZlPE + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: M + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: w4N-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: w4N-default-metrics-reader +subjects: +- kind: ServiceAccount + name: wZ2 + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: 1jQ9Wuut: FnhwzZlPE @@ -22625,6 +22949,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: wDZRv + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: EW2WAAJK-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: EW2WAAJK-default-metrics-reader +subjects: +- kind: ServiceAccount + name: EW2WAAJK + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -24398,6 +24743,30 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + B6AmPaZ: Usluk + W7aCzkZu: pID + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: uRLJ1j + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + tCcCCzCLW: D + name: YKWWF-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: YKWWF-default-metrics-reader +subjects: +- kind: ServiceAccount + name: YKWWF + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -26022,6 +26391,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: 3RQJ3j + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: w-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: w-default-metrics-reader +subjects: +- kind: ServiceAccount + name: w + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -27622,6 +28012,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: mBH + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: t-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: t-default-metrics-reader +subjects: +- kind: ServiceAccount + name: "" + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -29557,6 +29968,33 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + "": YQe + DT: TQ2bLJESBNS + JECbiE5z: 7GsL + labels: + "": 9e4vIgl + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: Keyw50T + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + nuj9z: 0teXTWiM + t6G2ZvuAcFNG: Fe773 + name: 3TCSa8p9-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 3TCSa8p9-default-metrics-reader +subjects: +- kind: ServiceAccount + name: qBAL + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: "": YQe @@ -31264,6 +31702,28 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + HB: 6Bh9uW + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: qV6xrD + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: 4Y-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 4Y-default-metrics-reader +subjects: +- kind: ServiceAccount + name: 9OwQUe + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: HB: 6Bh9uW @@ -33069,6 +33529,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: QU1RdnSl5WDZjW + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: 6o-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 6o-default-metrics-reader +subjects: +- kind: ServiceAccount + name: I + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -34503,6 +34984,29 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + IH: S4ywxLg + SVbJKOD: M6NY + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: su3 + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: ISOeWop-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ISOeWop-default-metrics-reader +subjects: +- kind: ServiceAccount + name: ISOeWop + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: IH: S4ywxLg @@ -36577,6 +37081,30 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + 0qwEw: sSE57 + PK22WJ: MB + ykqJNdBG: 8uR6z5RvKA + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: mXKX + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: J8-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: J8-default-metrics-reader +subjects: +- kind: ServiceAccount + name: "" + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: 0qwEw: sSE57 @@ -38756,6 +39284,28 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + "": YeheDN + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: nfr7ZK + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: laYV0ze-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: laYV0ze-default-metrics-reader +subjects: +- kind: ServiceAccount + name: WGtRG + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: "": YeheDN @@ -40406,6 +40956,29 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + dj4k3Kh: OZtu + wy272: qoP + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: TTQ + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: iYPCfHtZuTxx-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: iYPCfHtZuTxx-default-metrics-reader +subjects: +- kind: ServiceAccount + name: LTY4w + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: dj4k3Kh: OZtu @@ -42031,6 +42604,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: HMyl + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: YGu-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: YGu-default-metrics-reader +subjects: +- kind: ServiceAccount + name: YGu + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -44523,6 +45117,29 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + "": f2I + b2WJ: b4O4Tjy + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: txo + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: Ipfd-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: Ipfd-default-metrics-reader +subjects: +- kind: ServiceAccount + name: qYiN + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: "": f2I @@ -46283,6 +46900,30 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + CxqPaEyk: fINHg + T5cgPilynoaoF: "" + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: pcJ + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + x: 6P + name: cwgT-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cwgT-default-metrics-reader +subjects: +- kind: ServiceAccount + name: PJFo + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -48037,6 +48678,29 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + G: 2P + btoGgD: 7n + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: xN + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: OpA-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: OpA-default-metrics-reader +subjects: +- kind: ServiceAccount + name: E9qW5z + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: G: 2P @@ -51468,6 +52132,31 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + lPyTQTQGT: n0PMo + xckN1ys: w + labels: + 0KxBSMaPK4: m3uQe + UBlzk: ah7sm1KDTy5 + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: tSP + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: w3Ea-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: w3Ea-default-metrics-reader +subjects: +- kind: ServiceAccount + name: CWrpD + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: lPyTQTQGT: n0PMo @@ -54401,6 +55090,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: TR7et + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: 04o0-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 04o0-default-metrics-reader +subjects: +- kind: ServiceAccount + name: uI + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -57956,6 +58666,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: XR3QTg + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: wE4Rkh-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: wE4Rkh-default-metrics-reader +subjects: +- kind: ServiceAccount + name: j7z + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -60022,6 +60753,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: KG9Ly + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: Tdd-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: Tdd-default-metrics-reader +subjects: +- kind: ServiceAccount + name: X + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -62633,6 +63385,31 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + 1m7aEPW: AFIshsK4y + L5ZjzgQXy: mCwoDQVU + labels: + 8WPzLP: t + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: R6F1vlUXkJ + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + nYQL: TIi + name: qGsQ1sX-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: qGsQ1sX-default-metrics-reader +subjects: +- kind: ServiceAccount + name: Q + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: 1m7aEPW: AFIshsK4y @@ -65198,6 +65975,30 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + 6t8EC: hh0EBBW0 + 40GBab: t0Z5V8Idb + "97": ysLUJXn4i + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: 2xmeT + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: 6z-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 6z-default-metrics-reader +subjects: +- kind: ServiceAccount + name: zMBGxjU + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -67274,6 +68075,32 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + AkL: gYS24 + S5EU2LQ: Khc + gbAz: a + labels: + 1D2wB0v: vTS5 + D1Nq: i8 + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: VyRQrGp + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: Zl-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: Zl-default-metrics-reader +subjects: +- kind: ServiceAccount + name: Yv9IwD + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: AkL: gYS24 @@ -69490,6 +70317,31 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + VL: TMl + eGwp: ys7M + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: S + app.kubernetes.io/version: v26.2.1-beta.1 + djhp: dDxn5K + helm.sh/chart: operator-26.2.1-beta.1 + j7k: M0STNp + name: X5ghd-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: X5ghd-default-metrics-reader +subjects: +- kind: ServiceAccount + name: gnFwZz3 + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: VL: TMl @@ -71600,6 +72452,32 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: + FS0CCFre54m: 58hk3 + zeZ1yo: BlUU + labels: + "6": nwe + SNwt: nldfORk + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: bVx + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + zrdLi6qjyStw: s6wqn + name: ZvF-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ZvF-default-metrics-reader +subjects: +- kind: ServiceAccount + name: ynJU + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: FS0CCFre54m: 58hk3 @@ -73608,6 +74486,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -75118,6 +76017,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -76627,6 +77547,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -78002,6 +78943,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -79394,6 +80356,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -80769,6 +81752,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -82144,6 +83148,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -83527,6 +84552,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -84920,6 +85966,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -86496,6 +87563,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -88127,6 +89215,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -89752,6 +90861,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -91310,6 +92440,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -92850,6 +94001,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: @@ -94356,6 +95528,27 @@ rules: # Source: operator/templates/entry-point.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.2.1-beta.1 + helm.sh/chart: operator-26.2.1-beta.1 + name: operator-default-metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default-metrics-reader +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: annotations: {} labels: diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/bundle.go b/operator/cmd/rpk-k8s/k8s/multicluster/bundle.go new file mode 100644 index 000000000..f657766f0 --- /dev/null +++ b/operator/cmd/rpk-k8s/k8s/multicluster/bundle.go @@ -0,0 +1,536 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package multicluster + +import ( + "context" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/go-units" + "github.com/spf13/cobra" + + "github.com/redpanda-data/redpanda-operator/operator/cmd/rpk-k8s/k8s/multicluster/checks" +) + +// BundleConfig drives `rpk k8s multicluster bundle`. +// +// Default mode is single-starting-cluster: the user provides a kubeconfig +// (or a single context name) and the bundle command discovers peer clusters +// from labelled cache Secrets stored on that cluster by the operator's +// raft-bootstrap flow. This makes the tool usable when one of the peer +// clusters is down — pick any reachable one as the starting point and the +// rest of the roster falls out of the apiserver you can reach. +// +// Multi-context mode is also supported: pass --context multiple times (or +// pre-populate Connection.Connections in tests) to bypass discovery and +// diagnose exactly the listed clusters. +type BundleConfig struct { + // Connection drives kubeconfig / context resolution. Reused from the + // `status` and `bootstrap` commands. + Connection ConnectionConfig + // OutputPath is the destination zip file. Empty means + // ./operator-bundle-.zip in the current working directory. + OutputPath string + // IncludePrivateKeys disables the default redaction of TLS private keys + // and cached peer kubeconfigs in serialised Secrets. Off by default + // because Support tickets often involve emailing the bundle around. + IncludePrivateKeys bool + + // SkipLogs disables operator pod log collection. + SkipLogs bool + // LogsSizeLimit is a human-readable byte limit for per-container logs + // (e.g. "5M", "10MiB", "1G"). "0" disables the cap. Empty string means + // the default ("5M"). Parsed once in Run via go-units. + LogsSizeLimit string + // LogsTailLines is the per-container tail-line cap. 0 disables the + // cap. Negative values are treated as 0. + LogsTailLines int64 + + // SkipMetrics disables operator metrics collection. + SkipMetrics bool + // MetricsSamples is the number of /metrics scrape samples taken per + // cluster (>= 2). The first sample is taken immediately; subsequent + // samples are taken after MetricsInterval. Capturing multiple samples + // lets an investigator compute counter rates post-hoc without needing + // access to a live Prometheus. 0 means use the default (2). Mirrors + // `rpk debug bundle --metrics-samples`. + MetricsSamples int + // MetricsInterval is the wall-clock interval between successive + // /metrics samples. 0 means use the default (10s). Must be > 0 when + // MetricsSamples >= 2. + MetricsInterval time.Duration + + // Verbose, when true, makes Run print a one-line progress message to + // ProgressOut (defaults to os.Stderr) at every major step: + // connection resolution, peer discovery, each cluster check, log + // collection, metrics scrape. Intended for diagnosing slow runs and + // hangs — leave off for clean machine-readable invocations. + Verbose bool + // ProgressOut is where Verbose progress lines go. Nil means + // os.Stderr; tests can capture by setting this to a buffer. + ProgressOut io.Writer + + // ClusterChecks lets callers override the per-cluster check list. Nil + // means use defaultClusterChecks (the same set the `status` command + // runs). Tests can pass a smaller list to avoid checks whose timeouts + // would dominate test wall-clock time. + ClusterChecks []checks.ClusterCheck + // CrossClusterChecks lets callers override the cross-cluster check + // list. Nil means use defaultCrossClusterChecks. + CrossClusterChecks []checks.CrossClusterCheck + // LogFetcherFor returns the log fetcher used for a given cluster + // connection. Nil means build a kubernetes.Interface-backed fetcher + // from the connection's REST config. Tests use this to inject a stub + // because envtest cannot return real container logs. + LogFetcherFor func(ClusterConnection) (logFetcher, error) + // MetricsFetcherFor returns the metrics fetcher used for a given + // cluster connection. Nil means build a kubernetes.Interface-backed + // fetcher (apiserver pod-proxy) from the connection's REST config. + // Tests inject a stub because envtest doesn't run a real metrics + // server. + MetricsFetcherFor func(ClusterConnection) (metricsFetcher, error) + + // Now overrides the clock for deterministic output in tests. Defaults + // to time.Now. + Now func() time.Time +} + +// BundleResult exposes the aggregate state collected during a bundle run for +// programmatic inspection (mirrors StatusResult). +type BundleResult struct { + // Path is the zip file the bundle was written to. Empty when Run wrote + // to a non-file io.Writer. + Path string + // Contexts holds the per-cluster CheckContext after all checks ran. + Contexts []*checks.CheckContext + // ClusterResults[i] are the per-cluster check Results for Contexts[i]. + ClusterResults [][]checks.Result + // CrossResults are the cross-cluster check Results. + CrossResults []checks.Result + // Errors collects non-fatal issues encountered during the run, in the + // same form they're written to errors.txt inside the bundle. + Errors []string +} + +// BindFlags registers the bundle command flags on cmd. +func (c *BundleConfig) BindFlags(cmd *cobra.Command) { + c.Connection.BindFlags(cmd) + cmd.Flags().StringVarP(&c.OutputPath, "output", "o", "", + "Path to write the bundle zip (default ./operator-bundle-.zip)") + cmd.Flags().BoolVar(&c.IncludePrivateKeys, "include-private-keys", false, + "Include TLS private keys and cached peer kubeconfigs in serialised Secrets. Off by default.") + cmd.Flags().BoolVar(&c.SkipLogs, "skip-logs", false, + "Skip operator pod log collection") + cmd.Flags().StringVar(&c.LogsSizeLimit, "logs-size-limit", "5M", + `Per-container log byte cap (human-readable, e.g. "5M", "10MiB", "1G"; "0" disables the cap)`) + cmd.Flags().Int64Var(&c.LogsTailLines, "logs-tail-lines", 5000, + "Per-container log tail-line cap (0 disables the cap)") + cmd.Flags().BoolVar(&c.SkipMetrics, "skip-metrics", false, + "Skip operator /metrics scrape") + cmd.Flags().IntVar(&c.MetricsSamples, "metrics-samples", defaultMetricsSamples, + "Number of /metrics samples taken per cluster (>= 2). Lets investigators compute counter rates post-hoc.") + cmd.Flags().DurationVar(&c.MetricsInterval, "metrics-interval", defaultMetricsInterval, + "Interval between successive /metrics samples (e.g. 10s, 1m). Must be > 0.") + cmd.Flags().BoolVarP(&c.Verbose, "verbose", "v", false, + "Print per-step progress to stderr (use to diagnose slow runs / hangs)") +} + +// Run executes the bundle pipeline and writes the resulting zip to w. Returns +// a BundleResult for programmatic inspection. Per-cluster collection failures +// are recorded in BundleResult.Errors (and in errors.txt inside the zip) +// rather than returned, so a single bad cluster doesn't lose the whole +// bundle. Errors that prevent any output (e.g. resolving connections) are +// returned. +func (c *BundleConfig) Run(ctx context.Context, w io.Writer) (*BundleResult, error) { + logf := c.progressLogger() + + // Validate flag-derived options up front so a bad --logs-size-limit / + // --metrics-samples / --metrics-interval fails before any cluster + // discovery or remote calls happen — saves the user a minute of + // connection work to find out they typo'd a flag. + logsOpts, lerr := c.resolveLogsOptions() + if lerr != nil { + return nil, lerr + } + metricsOpts, merr := c.resolveMetricsOptions() + if merr != nil { + return nil, merr + } + + logf("resolving starting connections (kubeconfig=%q, contexts=%v)", + c.Connection.Kubeconfig, c.Connection.Contexts) + starting, err := c.Connection.Resolve() + if err != nil { + return nil, fmt.Errorf("resolving starting connection: %w", err) + } + if len(starting) == 0 { + return nil, fmt.Errorf("no starting connections resolved; pass --kubeconfig or --context") + } + logf("resolved %d starting connection(s)", len(starting)) + + roster := make([]ClusterConnection, 0, len(starting)) + roster = append(roster, starting...) + var errs []string + + // Single-starting-cluster mode: discover peers from cache Secrets on + // the one cluster the user gave us. Multi-context mode bypasses + // discovery (the user is asserting the roster). + if len(starting) == 1 { + logf("discovering peers from labelled cache Secrets in namespace %q on %q", + c.Connection.Namespace, starting[0].Name) + discovery, derr := discoverPeers(ctx, starting[0].Ctl, c.Connection.Namespace) + if derr != nil { + errs = append(errs, fmt.Sprintf("peer discovery failed: %v. The bundle covers only the starting cluster.", derr)) + logf("peer discovery failed: %v (continuing with starting cluster only)", derr) + } + errs = append(errs, discovery.Warnings...) + peers := dedupeBySelf(discovery.Connections, starting[0].Name) + for _, p := range peers { + logf(" discovered peer %q", p.Name) + } + for _, w := range discovery.Warnings { + logf(" discovery warning: %s", w) + } + roster = append(roster, peers...) + } else { + logf("multi-context mode: skipping discovery, using %d explicit cluster(s)", len(starting)) + } + logf("roster has %d cluster(s) total", len(roster)) + + bw := newBundleWriter(w) + defer bw.Close() + + clusterChecks := c.ClusterChecks + if clusterChecks == nil { + clusterChecks = defaultClusterChecks + } + crossClusterChecks := c.CrossClusterChecks + if crossClusterChecks == nil { + crossClusterChecks = defaultCrossClusterChecks + } + + contexts := make([]*checks.CheckContext, len(roster)) + clusterResults := make([][]checks.Result, len(roster)) + for i, conn := range roster { + cc := &checks.CheckContext{ + Context: conn.Name, + Namespace: c.Connection.Namespace, + ServiceName: c.Connection.ServiceName, + SecretPrefix: conn.SecretPrefix, + Ctl: conn.Ctl, + } + contexts[i] = cc + // Inline what RunClusterChecks does so we can log per-check + // timing — invaluable for diagnosing which check hung when an + // invocation appears stuck. + var results []checks.Result + for _, check := range clusterChecks { + logf("[%s] running check %q", cc.Context, check.Name()) + start := time.Now() + results = append(results, check.Run(ctx, cc)...) + logf("[%s] check %q completed in %s", cc.Context, check.Name(), time.Since(start).Round(time.Millisecond)) + } + clusterResults[i] = results + } + names := make([]string, 0, len(crossClusterChecks)) + for _, cross := range crossClusterChecks { + names = append(names, cross.Name()) + } + logf("running %d cross-cluster check(s): %v", len(crossClusterChecks), names) + crossStart := time.Now() + crossResults := checks.RunCrossClusterChecks(contexts, crossClusterChecks) + logf("cross-cluster checks completed in %s", time.Since(crossStart).Round(time.Millisecond)) + + now := time.Now + if c.Now != nil { + now = c.Now + } + logf("writing manifest.json and status.txt") + if err := bw.writeManifestFile(c, contexts, now().UTC(), logsOpts, metricsOpts); err != nil { + errs = append(errs, fmt.Sprintf("writing manifest.json: %v", err)) + } + if err := bw.writeStatusTable(contexts, clusterResults, crossResults); err != nil { + errs = append(errs, fmt.Sprintf("writing status.txt: %v", err)) + } + for i, cc := range contexts { + logf("[%s] serialising check artifacts (pod, deployment, TLS, raft-status)", cc.Context) + for _, e := range bw.writeClusterArtifacts(cc, clusterResults[i], c.IncludePrivateKeys) { + errs = append(errs, fmt.Sprintf("cluster %s: %v", cc.Context, e)) + } + } + + // Phase 2: per-cluster operator pod logs. Skipped when --skip-logs is + // set or when the per-cluster PodCheck didn't find a pod (cc.Pod is + // nil). Per-container failures are recorded in errors.txt; the bundle + // still completes. + if !c.SkipLogs { + for i, conn := range roster { + logf("[%s] collecting operator pod logs", conn.Name) + start := time.Now() + fetcher, ferr := c.logFetcherFor(conn) + if ferr != nil { + errs = append(errs, fmt.Sprintf("cluster %s: building log fetcher: %v", conn.Name, ferr)) + logf("[%s] log fetcher build failed: %v", conn.Name, ferr) + continue + } + for _, e := range collectClusterLogs(ctx, bw, contexts[i], fetcher, logsOpts) { + errs = append(errs, fmt.Sprintf("cluster %s: %v", conn.Name, e)) + } + logf("[%s] log collection done in %s", conn.Name, time.Since(start).Round(time.Millisecond)) + } + } else { + logf("--skip-logs set, not collecting operator pod logs") + } + + // Phase 3: per-cluster operator /metrics scrape via the apiserver + // pod-proxy. Skipped when --skip-metrics is set, when PodCheck didn't + // find a pod, or when the operator was deployed without + // --metrics-bind-address. Scrape failures are recorded in errors.txt + // and don't fail the bundle. + if !c.SkipMetrics { + for i, conn := range roster { + logf("[%s] scraping /metrics (%d sample(s) at %s interval)", + conn.Name, metricsOpts.Samples, metricsOpts.Interval) + start := time.Now() + fetcher, ferr := c.metricsFetcherFor(conn) + if ferr != nil { + errs = append(errs, fmt.Sprintf("cluster %s: building metrics fetcher: %v", conn.Name, ferr)) + logf("[%s] metrics fetcher build failed: %v", conn.Name, ferr) + continue + } + for _, e := range collectClusterMetrics(ctx, bw, contexts[i], fetcher, metricsOpts, logf) { + errs = append(errs, fmt.Sprintf("cluster %s: %v", conn.Name, e)) + } + logf("[%s] metrics scrape done in %s", conn.Name, time.Since(start).Round(time.Millisecond)) + } + } else { + logf("--skip-metrics set, not scraping /metrics") + } + if err := bw.writeCrossClusterArtifacts(crossResults); err != nil { + errs = append(errs, fmt.Sprintf("writing cross-cluster/checks.json: %v", err)) + } + if err := bw.writeErrors(errs); err != nil { + // errors.txt itself failed to write — surface to caller. + return nil, fmt.Errorf("writing errors.txt: %w", err) + } + + return &BundleResult{ + Contexts: contexts, + ClusterResults: clusterResults, + CrossResults: crossResults, + Errors: errs, + }, nil +} + +// progressLogger returns a printf-style function used by Run to report +// per-step progress. When Verbose is false the returned closure is a no-op +// (no allocation per call site beyond the format-string evaluation, which +// would happen in either case). +func (c *BundleConfig) progressLogger() func(format string, args ...any) { + if !c.Verbose { + return func(string, ...any) {} + } + out := c.ProgressOut + if out == nil { + out = os.Stderr + } + return func(format string, args ...any) { + fmt.Fprintf(out, "[bundle %s] "+format+"\n", + append([]any{time.Now().Format("15:04:05.000")}, args...)...) + } +} + +// dedupeBySelf removes any peer connection that has the same name as the +// starting cluster. Defends against the (legitimate) case where the user's +// own cache contains a self-entry; we take the starting connection as the +// authoritative one. +func dedupeBySelf(peers []ClusterConnection, selfName string) []ClusterConnection { + out := peers[:0] + for _, p := range peers { + if p.Name == selfName { + continue + } + out = append(out, p) + } + return out +} + +// resolveLogsOptions parses LogsSizeLimit (a human-readable byte string) and +// returns the LogsOptions used for log retrieval. Empty LogsSizeLimit yields +// the package default (5 MiB / 5000 lines). Returns an error only when +// LogsSizeLimit is set but unparseable — bad input deserves a clear failure +// rather than a silently-large bundle. +func (c *BundleConfig) resolveLogsOptions() (LogsOptions, error) { + defaults := defaultLogsOptions() + out := LogsOptions{TailLines: c.LogsTailLines} + if c.LogsTailLines == 0 { + out.TailLines = defaults.TailLines + } else if c.LogsTailLines < 0 { + out.TailLines = 0 + } + switch c.LogsSizeLimit { + case "": + out.LimitBytes = defaults.LimitBytes + case "0": + out.LimitBytes = 0 + default: + n, err := parseLogsSize(c.LogsSizeLimit) + if err != nil { + return LogsOptions{}, fmt.Errorf("--logs-size-limit %q: %w", c.LogsSizeLimit, err) + } + out.LimitBytes = n + } + return out, nil +} + +// resolveMetricsOptions validates --metrics-samples and --metrics-interval and +// returns a MetricsOptions ready for use by collectClusterMetrics. Mirrors +// rpk debug bundle's validation: samples must be >= 2 (a single sample +// loses the rate-of-change information that's the whole point of multiple +// samples) and interval must be > 0. +func (c *BundleConfig) resolveMetricsOptions() (MetricsOptions, error) { + samples := c.MetricsSamples + if samples == 0 { + samples = defaultMetricsSamples + } + if samples < 2 { + return MetricsOptions{}, fmt.Errorf("--metrics-samples must be >= 2, got %d", samples) + } + interval := c.MetricsInterval + if interval == 0 { + interval = defaultMetricsInterval + } + if interval <= 0 { + return MetricsOptions{}, fmt.Errorf("--metrics-interval must be > 0, got %s", interval) + } + return MetricsOptions{Samples: samples, Interval: interval}, nil +} + +// parseLogsSize accepts both decimal SI suffixes ("5M", "10MB" → power-of-10) +// and binary IEC suffixes ("5Mi", "10MiB" → power-of-1024), matching the +// dual conventions users see in Kubernetes resource limits and in +// `rpk debug bundle --logs-size-limit`. Routes binary forms through +// units.RAMInBytes (which uses 1024-based units) and decimal forms through +// units.FromHumanSize (1000-based) — RAMInBytes is not used for decimal +// inputs because it would silently treat "5M" as 5 MiB. +func parseLogsSize(s string) (int64, error) { + trimmed := strings.TrimSpace(s) + // IEC suffixes: anything ending in "i" (e.g. "5Mi") or "iB" (e.g. "5MiB"). + if strings.HasSuffix(trimmed, "i") || strings.HasSuffix(trimmed, "iB") { + return units.RAMInBytes(trimmed) + } + return units.FromHumanSize(trimmed) +} + +// logFetcherFor returns the logFetcher for a roster entry. When the caller +// configured a custom factory (test injection), that factory is used; the +// default builds a kubernetes.Interface-backed fetcher from the connection's +// REST config. +func (c *BundleConfig) logFetcherFor(conn ClusterConnection) (logFetcher, error) { + if c.LogFetcherFor != nil { + return c.LogFetcherFor(conn) + } + if conn.Ctl == nil { + return nil, fmt.Errorf("connection %q has no kube.Ctl", conn.Name) + } + return newKubeLogFetcher(conn.Ctl.RestConfig()) +} + +// metricsFetcherFor returns the metricsFetcher for a roster entry. When the +// caller configured a custom factory (test injection), that factory is +// used; the default builds an apiserver-pod-proxy-backed fetcher from the +// connection's REST config. +func (c *BundleConfig) metricsFetcherFor(conn ClusterConnection) (metricsFetcher, error) { + if c.MetricsFetcherFor != nil { + return c.MetricsFetcherFor(conn) + } + if conn.Ctl == nil { + return nil, fmt.Errorf("connection %q has no kube.Ctl", conn.Name) + } + return newKubeMetricsFetcher(conn.Ctl) +} + +func bundleCommand() *cobra.Command { + var cfg BundleConfig + + cmd := &cobra.Command{ + Use: "bundle", + Short: "Collect a diagnostics bundle from a multicluster operator deployment", + Long: `Collects environment data from each Kubernetes cluster running the +Redpanda multicluster operator and packages it into a ZIP file for support. +This is the operator-side counterpart to 'rpk debug bundle', which collects +data from the Redpanda brokers themselves. + +By default, only one Kubernetes context is required. The bundle command +discovers peer clusters from labelled cache Secrets stored by the operator's +raft-bootstrap flow on the starting cluster, so the tool stays useful when +one of the peer clusters is down. Pass multiple --context flags to bypass +discovery and diagnose exactly that set.`, + Example: ` # Single context — discover peers from the starting cluster + rpk k8s multicluster bundle --kubeconfig /path/to/kubeconfig + + # Specific starting cluster from the default kubeconfig + rpk k8s multicluster bundle --context cluster-a + + # Bypass discovery and bundle the listed clusters directly + rpk k8s multicluster bundle --context cluster-a --context cluster-b --context cluster-c + + # Write to a specific path + rpk k8s multicluster bundle --context cluster-a -o /tmp/operator-bundle.zip`, + RunE: func(cmd *cobra.Command, args []string) error { + path, file, err := cfg.openOutput() + if err != nil { + return err + } + defer file.Close() + res, err := cfg.Run(cmd.Context(), file) + if err != nil { + return err + } + if res != nil { + res.Path = path + } + fmt.Fprintf(cmd.OutOrStdout(), "Operator bundle written to %s\n", path) + if len(res.Errors) > 0 { + fmt.Fprintf(cmd.OutOrStdout(), + "Bundle completed with %d non-fatal issue(s); see errors.txt inside the zip.\n", + len(res.Errors), + ) + } + return nil + }, + } + + cfg.BindFlags(cmd) + return cmd +} + +// openOutput resolves the output path (defaulting to a timestamped filename +// in the current working directory) and creates the file. The caller is +// responsible for closing the returned *os.File. +func (c *BundleConfig) openOutput() (string, *os.File, error) { + now := time.Now + if c.Now != nil { + now = c.Now + } + path := c.OutputPath + if path == "" { + path = fmt.Sprintf("operator-bundle-%d.zip", now().UTC().Unix()) + } + f, err := os.Create(path) + if err != nil { + return "", nil, fmt.Errorf("creating output file %s: %w", path, err) + } + return path, f, nil +} diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/bundle_discover.go b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_discover.go new file mode 100644 index 000000000..cfb8c5fbd --- /dev/null +++ b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_discover.go @@ -0,0 +1,100 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package multicluster + +import ( + "context" + "fmt" + + "github.com/redpanda-data/common-go/kube" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/redpanda-data/redpanda-operator/pkg/multicluster" +) + +// peerDiscovery is the result of looking up cache Secrets on a starting +// cluster and turning each one into a peer ClusterConnection. +type peerDiscovery struct { + // Connections are the peers built from labelled cache Secrets, in + // arbitrary order (apiserver list order). Excludes the starting cluster. + Connections []ClusterConnection + // Warnings are non-fatal issues encountered during discovery (e.g. a + // malformed cache Secret); the bundle proceeds with whatever peers were + // discoverable. Each entry is a complete sentence suitable for errors.txt. + Warnings []string +} + +// discoverPeers lists peer-kubeconfig cache Secrets in `namespace` of the +// starting cluster (selected by the `app.kubernetes.io/component= +// multicluster-kubeconfig-cache` label) and builds a ClusterConnection for +// each. The peer name is read from the MulticlusterPeerLabel rather than +// derived from the Secret name, so this works regardless of the operator's +// configured --kubeconfig-name prefix. +// +// Errors that prevent listing the Secrets at all are returned. Per-Secret +// failures (missing kubeconfig.yaml key, malformed kubeconfig, etc.) are +// recorded in peerDiscovery.Warnings so the bundle can still cover the +// peers that were discoverable. +func discoverPeers(ctx context.Context, ctl *kube.Ctl, namespace string) (peerDiscovery, error) { + var list corev1.SecretList + if err := ctl.List(ctx, namespace, &list, client.MatchingLabels{ + multicluster.KubeconfigCacheComponentLabel: multicluster.KubeconfigCacheComponentValue, + }); err != nil { + return peerDiscovery{}, fmt.Errorf("listing kubeconfig cache Secrets in %q: %w", namespace, err) + } + + var out peerDiscovery + for _, s := range list.Items { + peerName := s.Labels[multicluster.MulticlusterPeerLabel] + if peerName == "" { + out.Warnings = append(out.Warnings, fmt.Sprintf( + "Secret %s/%s is missing the %s label, skipping", + s.Namespace, s.Name, multicluster.MulticlusterPeerLabel, + )) + continue + } + + raw, ok := s.Data["kubeconfig.yaml"] + if !ok || len(raw) == 0 { + out.Warnings = append(out.Warnings, fmt.Sprintf( + "Secret %s/%s is missing the kubeconfig.yaml data key, skipping peer %q", + s.Namespace, s.Name, peerName, + )) + continue + } + + rc, err := multicluster.LoadKubeconfigFromBytes(raw) + if err != nil { + out.Warnings = append(out.Warnings, fmt.Sprintf( + "parsing kubeconfig from Secret %s/%s for peer %q: %v", + s.Namespace, s.Name, peerName, err, + )) + continue + } + + peerCtl, err := kube.FromRESTConfig(rc) + if err != nil { + out.Warnings = append(out.Warnings, fmt.Sprintf( + "building client for peer %q from Secret %s/%s: %v", + peerName, s.Namespace, s.Name, err, + )) + continue + } + + out.Connections = append(out.Connections, ClusterConnection{ + Name: peerName, + Ctl: peerCtl, + SecretPrefix: peerName, + }) + } + + return out, nil +} diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/bundle_logs.go b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_logs.go new file mode 100644 index 000000000..c7c3838dc --- /dev/null +++ b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_logs.go @@ -0,0 +1,165 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package multicluster + +import ( + "context" + "fmt" + "io" + "path" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/utils/ptr" + + "github.com/redpanda-data/redpanda-operator/operator/cmd/rpk-k8s/k8s/multicluster/checks" +) + +// LogsOptions controls per-container log retrieval. Zero values disable the +// corresponding cap. +type LogsOptions struct { + // LimitBytes caps the number of bytes returned per container. 0 means + // no cap (the apiserver still applies its own internal limit, which is + // typically generous). + LimitBytes int64 + // TailLines caps how many lines from the tail of the log are returned + // per container. 0 means no cap. + TailLines int64 +} + +// defaultLogsOptions returns the limits the bundle command applies when the +// caller doesn't override them. Values picked to bound bundle size at ~5 MiB +// per container while still being useful for incident review. +func defaultLogsOptions() LogsOptions { + return LogsOptions{LimitBytes: 5 * 1024 * 1024, TailLines: 5000} +} + +// logFetcher is the minimal abstraction over the Kubernetes log API needed +// by collectClusterLogs. Production uses a kubernetes.Interface-backed +// implementation; tests stub it because envtest's apiserver doesn't run +// kubelets and cannot return real container output. +type logFetcher interface { + Logs(ctx context.Context, namespace, podName, container string, opts *corev1.PodLogOptions) ([]byte, error) +} + +// kubeLogFetcher fetches logs via a kubernetes.Interface built from a REST +// config. It buffers the entire response (capped by the caller's +// LimitBytes/TailLines) into memory before returning so the bundle writer +// only sees a complete blob. +type kubeLogFetcher struct { + cs kubernetes.Interface +} + +// newKubeLogFetcher returns a logFetcher backed by a kubernetes.Interface +// built from cfg. cfg is the same REST config the kube.Ctl was built from. +func newKubeLogFetcher(cfg *rest.Config) (*kubeLogFetcher, error) { + if cfg == nil { + return nil, fmt.Errorf("newKubeLogFetcher: nil rest.Config") + } + cs, err := kubernetes.NewForConfig(cfg) + if err != nil { + return nil, fmt.Errorf("building kubernetes clientset: %w", err) + } + return &kubeLogFetcher{cs: cs}, nil +} + +func (k *kubeLogFetcher) Logs(ctx context.Context, namespace, podName, container string, opts *corev1.PodLogOptions) ([]byte, error) { + // Defensive copy + container override so a caller-passed opts struct + // can be reused across containers without surprises. + cp := opts.DeepCopy() + cp.Container = container + stream, err := k.cs.CoreV1().Pods(namespace).GetLogs(podName, cp).Stream(ctx) + if err != nil { + return nil, err + } + defer stream.Close() //nolint:errcheck // best-effort close on stream + return io.ReadAll(stream) +} + +// collectClusterLogs writes per-container log files into the bundle for the +// pod recorded on cc.Pod. For every container — init and main — the function +// fetches the current log and, when the container has restarted, also the +// previous instance's log (Previous: true). +// +// Per-container fetches are independent: a failure on one container is +// recorded in the returned []error and the next container is attempted. A +// nil cc.Pod (PodCheck didn't find a pod) yields no work and no errors. +func collectClusterLogs(ctx context.Context, bw *bundleWriter, cc *checks.CheckContext, fetcher logFetcher, opts LogsOptions) []error { + if cc == nil || cc.Pod == nil || fetcher == nil { + return nil + } + + restarts := containerRestartCounts(cc.Pod) + + type entry struct { + name string + } + var containers []entry + // Init containers first, matching `kubectl logs --all-containers` order + // and making the bundle predictable when humans glance at it. + for _, c := range cc.Pod.Spec.InitContainers { + containers = append(containers, entry{name: c.Name}) + } + for _, c := range cc.Pod.Spec.Containers { + containers = append(containers, entry{name: c.Name}) + } + + var errs []error + for _, ce := range containers { + // Always fetch the current log; fetch previous only if the + // container has actually restarted, otherwise the apiserver + // returns "previous terminated container not found". + fetches := []bool{false} + if restarts[ce.name] > 0 { + fetches = append(fetches, true) + } + for _, previous := range fetches { + getOpts := &corev1.PodLogOptions{Previous: previous} + if opts.LimitBytes > 0 { + getOpts.LimitBytes = ptr.To(opts.LimitBytes) + } + if opts.TailLines > 0 { + getOpts.TailLines = ptr.To(opts.TailLines) + } + + data, err := fetcher.Logs(ctx, cc.Namespace, cc.Pod.Name, ce.name, getOpts) + if err != nil { + errs = append(errs, fmt.Errorf("logs for %s/%s container %s previous=%v: %w", + cc.Pod.Namespace, cc.Pod.Name, ce.name, previous, err)) + continue + } + + fname := ce.name + ".log" + if previous { + fname = ce.name + ".previous.log" + } + if werr := bw.writeBytes(path.Join("clusters", cc.Context, "logs", fname), data); werr != nil { + errs = append(errs, werr) + } + } + } + return errs +} + +// containerRestartCounts returns a map from container name to its restart +// count, including init containers. Used to decide whether to attempt a +// `Previous: true` fetch (the apiserver returns an error if no previous +// instance exists). +func containerRestartCounts(p *corev1.Pod) map[string]int32 { + out := make(map[string]int32, len(p.Status.ContainerStatuses)+len(p.Status.InitContainerStatuses)) + for _, cs := range p.Status.InitContainerStatuses { + out[cs.Name] = cs.RestartCount + } + for _, cs := range p.Status.ContainerStatuses { + out[cs.Name] = cs.RestartCount + } + return out +} diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/bundle_logs_test.go b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_logs_test.go new file mode 100644 index 000000000..329b4c732 --- /dev/null +++ b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_logs_test.go @@ -0,0 +1,285 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package multicluster + +import ( + "archive/zip" + "bytes" + "context" + "fmt" + "io" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/redpanda-data/redpanda-operator/operator/cmd/rpk-k8s/k8s/multicluster/checks" +) + +// fakeLogFetcher is a deterministic logFetcher used by Phase 2 unit tests. +// It records the exact (podName, container, opts) tuples it was asked for +// and serves canned responses keyed by container name. Every call appends a +// trailer that distinguishes current from previous logs so tests can assert +// the right `Previous` flag was passed. +type fakeLogFetcher struct { + // responses maps container name -> static body returned for the + // current-log fetch (Previous=false). The previous-log body is the + // same body with a "[previous]" suffix. + responses map[string]string + // fail maps "container#previous=" -> error to return on that + // fetch. Empty means succeed. + fail map[string]error + // calls records every Logs invocation. Tests assert on this. + calls []logCall +} + +type logCall struct { + Namespace string + PodName string + Container string + Previous bool + Limit int64 // 0 if unset + Tail int64 // 0 if unset +} + +func (f *fakeLogFetcher) Logs(_ context.Context, namespace, podName, container string, opts *corev1.PodLogOptions) ([]byte, error) { + c := logCall{ + Namespace: namespace, + PodName: podName, + Container: container, + Previous: opts.Previous, + } + if opts.LimitBytes != nil { + c.Limit = *opts.LimitBytes + } + if opts.TailLines != nil { + c.Tail = *opts.TailLines + } + f.calls = append(f.calls, c) + + key := fmt.Sprintf("%s#previous=%v", container, opts.Previous) + if err, ok := f.fail[key]; ok { + return nil, err + } + + body, ok := f.responses[container] + if !ok { + body = "" + } + if opts.Previous { + body += "[previous]" + } + return []byte(body), nil +} + +func TestCollectClusterLogs_HappyPath(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "operator-0", Namespace: "redpanda"}, + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{{Name: "init"}}, + Containers: []corev1.Container{{Name: "manager"}, {Name: "sidecar"}}, + }, + Status: corev1.PodStatus{ + InitContainerStatuses: []corev1.ContainerStatus{ + {Name: "init", RestartCount: 0}, + }, + ContainerStatuses: []corev1.ContainerStatus{ + // manager has restarted once → "previous" log should be fetched. + {Name: "manager", RestartCount: 2}, + {Name: "sidecar", RestartCount: 0}, + }, + }, + } + cc := &checks.CheckContext{ + Context: "self", + Namespace: "redpanda", + Pod: pod, + } + fetcher := &fakeLogFetcher{ + responses: map[string]string{ + "init": "init log body", + "manager": "manager log body", + "sidecar": "sidecar log body", + }, + } + + var buf bytes.Buffer + bw := newBundleWriter(&buf) + errs := collectClusterLogs(context.Background(), bw, cc, fetcher, + LogsOptions{LimitBytes: 1024, TailLines: 100}) + require.Empty(t, errs, "happy path should produce no errors") + require.NoError(t, bw.Close()) + + // Expected file set: every container's current log, plus manager.previous + // because that's the only container with RestartCount > 0. + files := readZipFilesInternal(t, buf.Bytes()) + got := sortedKeys(files) + want := []string{ + "clusters/self/logs/init.log", + "clusters/self/logs/manager.log", + "clusters/self/logs/manager.previous.log", + "clusters/self/logs/sidecar.log", + } + assert.Equal(t, want, got) + + assert.Equal(t, "manager log body", string(files["clusters/self/logs/manager.log"])) + assert.Equal(t, "manager log body[previous]", string(files["clusters/self/logs/manager.previous.log"])) + + // Verify the LogsOptions caps were forwarded to the fetcher. + for _, c := range fetcher.calls { + assert.Equal(t, int64(1024), c.Limit, "LimitBytes must propagate") + assert.Equal(t, int64(100), c.Tail, "TailLines must propagate") + assert.Equal(t, "redpanda", c.Namespace) + assert.Equal(t, "operator-0", c.PodName) + } + // 4 calls expected: init, manager, manager(prev), sidecar. + require.Len(t, fetcher.calls, 4) +} + +func TestCollectClusterLogs_PerContainerErrorIsRecorded(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "operator-0", Namespace: "redpanda"}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "manager"}, {Name: "sidecar"}}, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + {Name: "manager", RestartCount: 0}, + {Name: "sidecar", RestartCount: 0}, + }, + }, + } + cc := &checks.CheckContext{Context: "self", Namespace: "redpanda", Pod: pod} + fetcher := &fakeLogFetcher{ + responses: map[string]string{ + "manager": "manager log body", + // sidecar deliberately omitted — fail path below. + }, + fail: map[string]error{ + "sidecar#previous=false": fmt.Errorf("simulated apiserver error"), + }, + } + + var buf bytes.Buffer + bw := newBundleWriter(&buf) + errs := collectClusterLogs(context.Background(), bw, cc, fetcher, LogsOptions{}) + require.NoError(t, bw.Close()) + + require.Len(t, errs, 1, "exactly one container should fail") + assert.Contains(t, errs[0].Error(), "sidecar") + assert.Contains(t, errs[0].Error(), "simulated apiserver error") + + // The successful container's log should still be in the zip — a single + // failing container can't lose the whole bundle. + files := readZipFilesInternal(t, buf.Bytes()) + assert.Contains(t, files, "clusters/self/logs/manager.log") + assert.NotContains(t, files, "clusters/self/logs/sidecar.log") +} + +func TestCollectClusterLogs_NoPodNoOp(t *testing.T) { + // PodCheck didn't find a pod (cc.Pod nil) — collectClusterLogs should + // be a no-op rather than panic. The bundle still completes; users see + // the upstream PodCheck failure in clusters//checks.json. + cc := &checks.CheckContext{Context: "self", Namespace: "redpanda"} + var buf bytes.Buffer + bw := newBundleWriter(&buf) + errs := collectClusterLogs(context.Background(), bw, cc, &fakeLogFetcher{}, LogsOptions{}) + require.NoError(t, bw.Close()) + assert.Empty(t, errs) +} + +func TestResolveLogsOptions(t *testing.T) { + for _, tc := range []struct { + name string + cfg BundleConfig + wantBytes int64 + wantTail int64 + wantErrSubs string // non-empty asserts an error containing this substring + }{ + { + name: "empty inputs use defaults", + cfg: BundleConfig{}, + wantBytes: defaultLogsOptions().LimitBytes, + wantTail: defaultLogsOptions().TailLines, + }, + { + name: "explicit human size and tail", + cfg: BundleConfig{LogsSizeLimit: "10MB", LogsTailLines: 200}, + wantBytes: 10_000_000, + wantTail: 200, + }, + { + name: "binary suffix accepted", + cfg: BundleConfig{LogsSizeLimit: "10MiB", LogsTailLines: 200}, + wantBytes: 10 * 1024 * 1024, + wantTail: 200, + }, + { + name: "0 disables size cap", + cfg: BundleConfig{LogsSizeLimit: "0", LogsTailLines: 0}, + wantBytes: 0, + wantTail: defaultLogsOptions().TailLines, + }, + { + name: "negative tail clamped to 0", + cfg: BundleConfig{LogsTailLines: -1}, + wantBytes: defaultLogsOptions().LimitBytes, + wantTail: 0, + }, + { + name: "unparseable size is an error", + cfg: BundleConfig{LogsSizeLimit: "definitely not a number"}, + wantErrSubs: "logs-size-limit", + }, + } { + t.Run(tc.name, func(t *testing.T) { + out, err := tc.cfg.resolveLogsOptions() + if tc.wantErrSubs != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.wantErrSubs) + return + } + require.NoError(t, err) + assert.Equal(t, tc.wantBytes, out.LimitBytes) + assert.Equal(t, tc.wantTail, out.TailLines) + }) + } +} + +// readZipFilesInternal mirrors readZipFiles in bundle_test.go but lives in +// the internal package so internal-package tests don't depend on the +// external test file. Test helper. +func readZipFilesInternal(t *testing.T, b []byte) map[string][]byte { + t.Helper() + zr, err := zip.NewReader(bytes.NewReader(b), int64(len(b))) + require.NoError(t, err) + out := make(map[string][]byte, len(zr.File)) + for _, f := range zr.File { + rc, err := f.Open() + require.NoError(t, err) + data, err := io.ReadAll(rc) + _ = rc.Close() + require.NoError(t, err) + out[f.Name] = data + } + return out +} + +func sortedKeys[V any](m map[string]V) []string { + out := make([]string, 0, len(m)) + for k := range m { + out = append(out, k) + } + sort.Strings(out) + return out +} diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/bundle_metrics.go b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_metrics.go new file mode 100644 index 000000000..b14c3cc08 --- /dev/null +++ b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_metrics.go @@ -0,0 +1,320 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package multicluster + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "net/http" + "path" + "strconv" + "strings" + "time" + + "github.com/redpanda-data/common-go/kube" + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/portforward" + "k8s.io/utils/ptr" + + "github.com/redpanda-data/redpanda-operator/operator/cmd/rpk-k8s/k8s/multicluster/checks" +) + +// MetricsOptions controls how often /metrics is sampled per cluster. Two +// samples (the default) at a 10s interval matches `rpk debug bundle`'s +// behaviour and lets investigators compute counter rates post-hoc without a +// live Prometheus. +type MetricsOptions struct { + Samples int + Interval time.Duration +} + +const ( + // defaultMetricsSamples is the default number of /metrics scrape + // samples per cluster. Mirrors rpk debug bundle's default of 2 — the + // minimum that lets you compute counter rate-of-change. + defaultMetricsSamples = 2 + // defaultMetricsInterval is the default wall-clock interval between + // successive /metrics samples. Mirrors rpk debug bundle's default. + defaultMetricsInterval = 10 * time.Second +) + +// metricsFetcher abstracts a GET against the operator's /metrics endpoint. +// The production implementation port-forwards into the pod and authenticates +// with a Bearer token minted via the TokenRequest API; tests stub it because +// envtest doesn't run kubelets and there is no real metrics server to scrape. +type metricsFetcher interface { + // Metrics returns the raw response body of GET /metrics on the named + // pod's container port. scheme is "http" or "https" depending on + // whether the metrics server is TLS-terminated. + Metrics(ctx context.Context, namespace, podName, scheme string, port int) ([]byte, error) +} + +// kubeMetricsFetcher implements metricsFetcher by port-forwarding to the +// operator pod's metrics port and scraping /metrics with a Bearer token +// minted via the TokenRequest API for the pod's ServiceAccount. +// +// Why not the apiserver pod-proxy: the operator wires its metrics server up +// with controller-runtime's filters.WithAuthenticationAndAuthorization, +// which expects either a Bearer token or a client cert that the metrics +// server's authenticator can validate. The apiserver pod-proxy opens a new +// HTTP connection to the pod and does NOT propagate the user's auth +// headers — the metrics server sees an unauthenticated request and returns +// 401, surfaced to client-go as "the server has asked for the client to +// provide credentials". Port-forward sidesteps that: we present the SA's +// minted token directly to the metrics filter, which TokenReview's it +// against the apiserver and forwards the identity into the +// SubjectAccessReview for /metrics. +// +// The scrape will still 403 if the SA doesn't have nonResourceURLs: +// /metrics granted (the chart doesn't grant this by default — separate +// follow-up). The error in that case is recorded in errors.txt and the +// rest of the bundle still completes. +type kubeMetricsFetcher struct { + ctl *kube.Ctl + cs kubernetes.Interface +} + +func newKubeMetricsFetcher(ctl *kube.Ctl) (*kubeMetricsFetcher, error) { + if ctl == nil { + return nil, fmt.Errorf("newKubeMetricsFetcher: nil kube.Ctl") + } + cs, err := kubernetes.NewForConfig(ctl.RestConfig()) + if err != nil { + return nil, fmt.Errorf("building kubernetes clientset: %w", err) + } + return &kubeMetricsFetcher{ctl: ctl, cs: cs}, nil +} + +// Metrics opens a port-forward to the metrics port on `podName`, mints a +// Bearer token for the pod's ServiceAccount via the TokenRequest API, and +// GETs /metrics over the forwarded port with that token. +func (k *kubeMetricsFetcher) Metrics(ctx context.Context, namespace, podName, scheme string, port int) ([]byte, error) { + pod, err := k.cs.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("getting pod %s/%s: %w", namespace, podName, err) + } + + forwarded, stop, err := k.ctl.PortForward(ctx, pod, io.Discard, io.Discard) + if err != nil { + return nil, fmt.Errorf("port-forwarding to %s/%s: %w", namespace, podName, err) + } + defer stop() + + localPort, ok := pickForwardedPort(forwarded, uint16(port)) + if !ok { + return nil, fmt.Errorf("metrics port %d not declared as a containerPort on pod %s/%s — port-forward returned ports %v", + port, namespace, podName, forwardedSummary(forwarded)) + } + + saName := pod.Spec.ServiceAccountName + if saName == "" { + saName = "default" + } + tokenResp, err := k.cs.CoreV1().ServiceAccounts(namespace).CreateToken(ctx, saName, + &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + ExpirationSeconds: ptr.To[int64](600), + }, + }, + metav1.CreateOptions{}, + ) + if err != nil { + return nil, fmt.Errorf("requesting Bearer token for ServiceAccount %s/%s: %w", namespace, saName, err) + } + + return scrapeMetrics(ctx, scheme, localPort, tokenResp.Status.Token) +} + +// pickForwardedPort returns the local port that PortForward mapped to the +// given remote port, or false if the remote port wasn't forwarded. +func pickForwardedPort(forwarded []portforward.ForwardedPort, remote uint16) (uint16, bool) { + for _, fp := range forwarded { + if fp.Remote == remote { + return fp.Local, true + } + } + return 0, false +} + +// forwardedSummary renders a port-forward result as a compact debug string +// for use in error messages. +func forwardedSummary(forwarded []portforward.ForwardedPort) []string { + out := make([]string, 0, len(forwarded)) + for _, fp := range forwarded { + out = append(out, fmt.Sprintf("%d->%d", fp.Local, fp.Remote)) + } + return out +} + +// scrapeMetrics performs the HTTP(S) GET /metrics with a Bearer token. The +// scheme is "http" or "https" — for HTTPS we accept any server cert +// because the metrics server typically uses self-signed certs and we're +// connecting via 127.0.0.1 anyway. +func scrapeMetrics(ctx context.Context, scheme string, localPort uint16, token string) ([]byte, error) { + url := fmt.Sprintf("%s://127.0.0.1:%d/metrics", scheme, localPort) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("building metrics request: %w", err) + } + req.Header.Set("Authorization", "Bearer "+token) + + client := &http.Client{ + Timeout: 30 * time.Second, + Transport: &http.Transport{ + // Self-signed by default — and we're hitting 127.0.0.1 + // over a port-forward, so MITM risk is moot. + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec + }, + } + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("scraping %s: %w", url, err) + } + defer resp.Body.Close() //nolint:errcheck + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("reading %s body: %w", url, err) + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("scraping %s: HTTP %d %s — body: %s", + url, resp.StatusCode, resp.Status, truncate(body, 512)) + } + return body, nil +} + +// truncate trims a byte slice to n bytes for use in error messages. +func truncate(b []byte, n int) string { + if len(b) <= n { + return string(b) + } + return string(b[:n]) + "...(truncated)" +} + +// collectClusterMetrics scrapes the operator's /metrics endpoint +// opts.Samples times at opts.Interval and writes each Prometheus exposition +// into the bundle at clusters//metrics/t_metrics.txt. Multiple +// samples let investigators compute counter rate-of-change post-hoc without +// needing a live Prometheus — matches `rpk debug bundle`'s behaviour. +// The actual transport is an implementation detail of the metricsFetcher; +// production goes through port-forward + Bearer-token auth (see +// kubeMetricsFetcher). +// +// Returns nil (no work, no error) when: +// +// - cc.Pod is nil (PodCheck didn't find a pod) +// - the operator deployment doesn't expose --metrics-bind-address +// (metrics server disabled) +// - the deploy args couldn't be parsed for a port +// +// Per-sample scrape failures are recorded in the returned []error and the +// next sample is still attempted. Context cancellation between samples +// returns immediately with whatever has been collected so far. +// +// progress, when non-nil, is called once per sample with a stderr-style +// "[] sample i/N" message. Run wires this to the --verbose +// progress logger. +func collectClusterMetrics( + ctx context.Context, + bw *bundleWriter, + cc *checks.CheckContext, + fetcher metricsFetcher, + opts MetricsOptions, + progress func(format string, args ...any), +) []error { + if cc == nil || cc.Pod == nil || fetcher == nil { + return nil + } + + port, ok := parseMetricsPort(cc.DeployArgs) + if !ok { + // The operator was deployed without --metrics-bind-address, so + // there's no metrics server to scrape. Not an error — skip. + return nil + } + scheme := metricsScheme(cc.DeployArgs) + + if opts.Samples < 1 { + opts.Samples = 1 + } + + var errs []error + for i := 0; i < opts.Samples; i++ { + if i > 0 { + // Wait between samples but respect cancellation so a + // long-running bundle can be aborted cleanly. + timer := time.NewTimer(opts.Interval) + select { + case <-ctx.Done(): + timer.Stop() + return errs + case <-timer.C: + } + } + if progress != nil { + progress("[%s] /metrics sample %d/%d", cc.Context, i+1, opts.Samples) + } + data, err := fetcher.Metrics(ctx, cc.Namespace, cc.Pod.Name, scheme, port) + if err != nil { + errs = append(errs, fmt.Errorf("scraping metrics from %s/%s sample %d/%d (%s://:%d): %w", + cc.Pod.Namespace, cc.Pod.Name, i+1, opts.Samples, scheme, port, err)) + continue + } + entry := path.Join("clusters", cc.Context, "metrics", fmt.Sprintf("t%d_metrics.txt", i)) + if werr := bw.writeBytes(entry, data); werr != nil { + errs = append(errs, werr) + } + } + return errs +} + +// parseMetricsPort returns the port the operator's metrics server listens +// on, parsed from --metrics-bind-address in the deployment's container +// args. The flag takes a "[host]:port" value; we ignore the host (which is +// typically empty meaning "listen on all interfaces"). +// +// Returns ok=false when: +// +// - the flag isn't present (metrics disabled) +// - the flag value is empty (metrics disabled — controller-runtime +// treats "" as disabled) +// - the flag value doesn't parse as a valid port number +func parseMetricsPort(args []string) (int, bool) { + const flag = "--metrics-bind-address" + value := checks.ExtractFlag(args, flag) + if value == "" { + return 0, false + } + // Strip the leading host (default ":8443" → "8443"). net.SplitHostPort + // is overkill here and rejects bare ports, so do the simple thing. + if i := strings.LastIndex(value, ":"); i >= 0 { + value = value[i+1:] + } + port, err := strconv.Atoi(value) + if err != nil || port <= 0 || port > 65535 { + return 0, false + } + return port, true +} + +// metricsScheme returns "https" when the deployment was started with +// --metrics-cert-path / --metrics-key-path (controller-runtime turns on +// SecureServing in that case), and "http" otherwise. Used by the +// apiserver pod-proxy URL builder. +func metricsScheme(args []string) string { + if checks.ExtractFlag(args, "--metrics-cert-path") != "" { + return "https" + } + return "http" +} diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/bundle_metrics_test.go b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_metrics_test.go new file mode 100644 index 000000000..5d11c5d82 --- /dev/null +++ b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_metrics_test.go @@ -0,0 +1,266 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package multicluster + +import ( + "bytes" + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/redpanda-data/redpanda-operator/operator/cmd/rpk-k8s/k8s/multicluster/checks" +) + +// singleSampleOpts is the simplest opts that satisfies collectClusterMetrics +// in the tests that only care about a single scrape. Two samples is the +// command-line default; tests that exercise multi-sample behaviour use a +// short interval to keep wall-clock time low. +var singleSampleOpts = MetricsOptions{Samples: 1, Interval: time.Millisecond} + +// fakeMetricsFetcher records its scrape arguments and returns a canned +// response. Tests assert on the recorded calls and on the bytes that ended +// up in the bundle zip. +type fakeMetricsFetcher struct { + body []byte + err error + calls []metricsCall +} + +type metricsCall struct { + Namespace string + PodName string + Scheme string + Port int +} + +func (f *fakeMetricsFetcher) Metrics(_ context.Context, namespace, podName, scheme string, port int) ([]byte, error) { + f.calls = append(f.calls, metricsCall{Namespace: namespace, PodName: podName, Scheme: scheme, Port: port}) + if f.err != nil { + return nil, f.err + } + return f.body, nil +} + +// fetcherWithCounter returns a different body (or error) on each call — +// used by the multi-sample tests to verify per-sample distinct content +// lands in distinct files. +type fetcherWithCounter struct { + cb func() ([]byte, error) +} + +func (f *fetcherWithCounter) Metrics(_ context.Context, _, _, _ string, _ int) ([]byte, error) { + return f.cb() +} + +func TestCollectClusterMetrics_HappyPath(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "operator-0", Namespace: "redpanda"}, + } + cc := &checks.CheckContext{ + Context: "self", + Namespace: "redpanda", + Pod: pod, + DeployArgs: []string{"--metrics-bind-address=:8443"}, + } + + body := []byte("# HELP example_total help text\nexample_total 1\n") + fetcher := &fakeMetricsFetcher{body: body} + + var buf bytes.Buffer + bw := newBundleWriter(&buf) + errs := collectClusterMetrics(context.Background(), bw, cc, fetcher, singleSampleOpts, nil) + require.NoError(t, bw.Close()) + require.Empty(t, errs) + + // One scrape, against the right pod and port. + require.Len(t, fetcher.calls, 1) + assert.Equal(t, metricsCall{Namespace: "redpanda", PodName: "operator-0", Scheme: "http", Port: 8443}, fetcher.calls[0]) + + // t0_metrics.txt must contain the canned body verbatim. + files := readZipFilesInternal(t, buf.Bytes()) + require.Contains(t, files, "clusters/self/metrics/t0_metrics.txt") + assert.Equal(t, string(body), string(files["clusters/self/metrics/t0_metrics.txt"])) +} + +func TestCollectClusterMetrics_MultipleSamples(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "operator-0", Namespace: "redpanda"}, + } + cc := &checks.CheckContext{ + Context: "self", + Namespace: "redpanda", + Pod: pod, + DeployArgs: []string{"--metrics-bind-address=:8443"}, + } + + // Each call returns a different body so we can verify per-sample + // content lands in its own file. + var calls int + fetcher := &fetcherWithCounter{cb: func() ([]byte, error) { + calls++ + return []byte(fmt.Sprintf("sample %d\n", calls)), nil + }} + + var buf bytes.Buffer + bw := newBundleWriter(&buf) + opts := MetricsOptions{Samples: 3, Interval: time.Millisecond} + errs := collectClusterMetrics(context.Background(), bw, cc, fetcher, opts, nil) + require.NoError(t, bw.Close()) + require.Empty(t, errs) + assert.Equal(t, 3, calls, "fetcher must be called once per sample") + + files := readZipFilesInternal(t, buf.Bytes()) + require.Contains(t, files, "clusters/self/metrics/t0_metrics.txt") + require.Contains(t, files, "clusters/self/metrics/t1_metrics.txt") + require.Contains(t, files, "clusters/self/metrics/t2_metrics.txt") + assert.Equal(t, "sample 1\n", string(files["clusters/self/metrics/t0_metrics.txt"])) + assert.Equal(t, "sample 2\n", string(files["clusters/self/metrics/t1_metrics.txt"])) + assert.Equal(t, "sample 3\n", string(files["clusters/self/metrics/t2_metrics.txt"])) +} + +func TestCollectClusterMetrics_PartialFailureContinues(t *testing.T) { + // Sample 2 fails but sample 1 and 3 succeed. The error must be + // recorded and the surviving samples must be in the bundle. + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "operator-0", Namespace: "redpanda"}, + } + cc := &checks.CheckContext{ + Context: "self", + Namespace: "redpanda", + Pod: pod, + DeployArgs: []string{"--metrics-bind-address=:8443"}, + } + + var calls int + fetcher := &fetcherWithCounter{cb: func() ([]byte, error) { + calls++ + if calls == 2 { + return nil, fmt.Errorf("transient: connection reset") + } + return []byte(fmt.Sprintf("sample %d\n", calls)), nil + }} + + var buf bytes.Buffer + bw := newBundleWriter(&buf) + opts := MetricsOptions{Samples: 3, Interval: time.Millisecond} + errs := collectClusterMetrics(context.Background(), bw, cc, fetcher, opts, nil) + require.NoError(t, bw.Close()) + require.Len(t, errs, 1) + assert.Contains(t, errs[0].Error(), "sample 2/3") + assert.Contains(t, errs[0].Error(), "transient: connection reset") + + files := readZipFilesInternal(t, buf.Bytes()) + require.Contains(t, files, "clusters/self/metrics/t0_metrics.txt") + assert.NotContains(t, files, "clusters/self/metrics/t1_metrics.txt", "failed sample must not produce a file") + require.Contains(t, files, "clusters/self/metrics/t2_metrics.txt") +} + +func TestCollectClusterMetrics_HTTPSWhenCertPathSet(t *testing.T) { + cc := &checks.CheckContext{ + Context: "self", + Namespace: "redpanda", + Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "operator-0", Namespace: "redpanda"}}, + DeployArgs: []string{ + "--metrics-bind-address=:8443", + "--metrics-cert-path=/tls/tls.crt", + "--metrics-key-path=/tls/tls.key", + }, + } + fetcher := &fakeMetricsFetcher{body: []byte("ok")} + + var buf bytes.Buffer + bw := newBundleWriter(&buf) + errs := collectClusterMetrics(context.Background(), bw, cc, fetcher, singleSampleOpts, nil) + require.NoError(t, bw.Close()) + require.Empty(t, errs) + require.Len(t, fetcher.calls, 1) + assert.Equal(t, "https", fetcher.calls[0].Scheme, + "--metrics-cert-path should select the https proxy scheme") +} + +func TestCollectClusterMetrics_MissingFlagSkipsCleanly(t *testing.T) { + // Operator deployed without --metrics-bind-address — metrics server + // disabled; collectClusterMetrics must be a no-op (no error, no + // fetch, no bundle entry). + cc := &checks.CheckContext{ + Context: "self", + Namespace: "redpanda", + Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "operator-0", Namespace: "redpanda"}}, + DeployArgs: []string{"--leader-elect"}, + } + fetcher := &fakeMetricsFetcher{body: []byte("should-not-be-fetched")} + + var buf bytes.Buffer + bw := newBundleWriter(&buf) + errs := collectClusterMetrics(context.Background(), bw, cc, fetcher, singleSampleOpts, nil) + require.NoError(t, bw.Close()) + assert.Empty(t, errs) + assert.Empty(t, fetcher.calls, "fetcher must not be called when --metrics-bind-address is unset") +} + +func TestCollectClusterMetrics_ErrorIsRecorded(t *testing.T) { + cc := &checks.CheckContext{ + Context: "self", + Namespace: "redpanda", + Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "operator-0", Namespace: "redpanda"}}, + DeployArgs: []string{"--metrics-bind-address=:8443"}, + } + fetcher := &fakeMetricsFetcher{err: fmt.Errorf("simulated 403 forbidden")} + + var buf bytes.Buffer + bw := newBundleWriter(&buf) + errs := collectClusterMetrics(context.Background(), bw, cc, fetcher, singleSampleOpts, nil) + require.NoError(t, bw.Close()) + require.Len(t, errs, 1) + assert.Contains(t, errs[0].Error(), "simulated 403 forbidden") + assert.Contains(t, errs[0].Error(), "operator-0") + + // No t_metrics.txt entry should exist when the only sample failed. + files := readZipFilesInternal(t, buf.Bytes()) + for fname := range files { + assert.NotContains(t, fname, "/metrics/t") + } +} + +func TestParseMetricsPort(t *testing.T) { + for _, tc := range []struct { + name string + args []string + wantPort int + wantOK bool + }{ + {name: "default :8443", args: []string{"--metrics-bind-address=:8443"}, wantPort: 8443, wantOK: true}, + {name: "host:port", args: []string{"--metrics-bind-address=0.0.0.0:9090"}, wantPort: 9090, wantOK: true}, + {name: "missing flag", args: []string{"--leader-elect"}, wantPort: 0, wantOK: false}, + {name: "empty value disables", args: []string{"--metrics-bind-address="}, wantPort: 0, wantOK: false}, + {name: "garbage value", args: []string{"--metrics-bind-address=:abc"}, wantPort: 0, wantOK: false}, + {name: "out of range", args: []string{"--metrics-bind-address=:99999"}, wantPort: 0, wantOK: false}, + } { + t.Run(tc.name, func(t *testing.T) { + port, ok := parseMetricsPort(tc.args) + assert.Equal(t, tc.wantOK, ok) + assert.Equal(t, tc.wantPort, port) + }) + } +} + +func TestMetricsScheme(t *testing.T) { + assert.Equal(t, "http", metricsScheme([]string{"--metrics-bind-address=:8443"})) + assert.Equal(t, "https", metricsScheme([]string{ + "--metrics-bind-address=:8443", + "--metrics-cert-path=/tls/tls.crt", + })) +} diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/bundle_test.go b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_test.go new file mode 100644 index 000000000..611f18981 --- /dev/null +++ b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_test.go @@ -0,0 +1,382 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package multicluster_test + +import ( + "archive/zip" + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/json" + "io" + "math/big" + "sort" + "testing" + "time" + + "github.com/redpanda-data/common-go/kube" + "github.com/redpanda-data/common-go/kube/kubetest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + + "github.com/redpanda-data/redpanda-operator/operator/cmd/rpk-k8s/k8s/multicluster" + "github.com/redpanda-data/redpanda-operator/operator/cmd/rpk-k8s/k8s/multicluster/checks" + mcpkg "github.com/redpanda-data/redpanda-operator/pkg/multicluster" + "github.com/redpanda-data/redpanda-operator/pkg/testutil" +) + +// fastTestChecks is the subset of cluster checks used by bundle tests. It +// excludes TLSCheck (uses WaitFor which polls until the secret exists or +// the context is cancelled — hangs against a vanilla envtest), RaftCheck +// (port-forwards to the operator pod), TLSSANCheck and DeploymentRaftCheck +// (depend on TLS state TLSCheck would have populated). PodCheck and +// DeploymentCheck are still exercised so the bundle's per-cluster artifact +// serialisation path is meaningful when those resources do exist. +var fastTestChecks = []checks.ClusterCheck{ + &checks.PodCheck{}, + &checks.DeploymentCheck{}, +} + +// TestBundleRun_RoundTrip_NoDiscovery exercises the full Run pipeline using +// pre-populated Connections (no discovery) against a single envtest. It +// verifies that a successful run produces the expected file layout, that +// manifest.json carries the right fields, and that errors.txt is omitted +// when no errors accumulated. +func TestIntegrationBundleRun_RoundTrip(t *testing.T) { + testutil.SkipIfNotIntegration(t) + + ctx := testutil.Context(t) + ctl := kubetest.NewEnv(t) + + // Frozen clock so manifest.json and the default output filename are + // reproducible across runs. + frozen := time.Date(2026, 1, 2, 3, 4, 5, 0, time.UTC) + cfg := &multicluster.BundleConfig{ + Connection: multicluster.ConnectionConfig{ + Namespace: "default", + ServiceName: "operator", + Connections: []multicluster.ClusterConnection{ + {Name: "self", Ctl: ctl, SecretPrefix: "self"}, + }, + }, + ClusterChecks: fastTestChecks, + Now: func() time.Time { return frozen }, + } + + var buf bytes.Buffer + res, err := cfg.Run(ctx, &buf) + require.NoError(t, err) + require.NotNil(t, res) + require.Len(t, res.Contexts, 1, "without discovery, the roster is just the starting cluster") + + files := readZipFiles(t, buf.Bytes()) + + // Expected files at the root. + require.Contains(t, files, "manifest.json") + require.Contains(t, files, "status.txt") + require.Contains(t, files, "clusters/self/checks.json") + require.Contains(t, files, "cross-cluster/checks.json") + + // errors.txt should NOT be present when no errors accumulated. The check + // pipeline against an empty envtest will produce *failing* check Results + // (no operator pod found, etc.) — those are recorded in checks.json, not + // errors.txt. errors.txt is only for collection-side failures. + assert.NotContains(t, files, "errors.txt") + + // manifest.json should round-trip with the expected fields. + var m struct { + SchemaVersion int `json:"schemaVersion"` + GeneratedAt time.Time `json:"generatedAt"` + Namespace string `json:"namespace"` + ServiceName string `json:"serviceName"` + IncludePrivateKeys bool `json:"includePrivateKeys"` + Clusters []string `json:"clusters"` + LogsCollected bool `json:"logsCollected"` + LogsLimitBytes int64 `json:"logsLimitBytes"` + LogsTailLines int64 `json:"logsTailLines"` + MetricsCollected bool `json:"metricsCollected"` + MetricsSamples int `json:"metricsSamples"` + MetricsIntervalSeconds float64 `json:"metricsIntervalSeconds"` + } + require.NoError(t, json.Unmarshal(files["manifest.json"], &m)) + assert.Equal(t, 1, m.SchemaVersion) + assert.True(t, m.GeneratedAt.Equal(frozen), "generatedAt should reflect the injected clock") + assert.Equal(t, "default", m.Namespace) + assert.Equal(t, "operator", m.ServiceName) + assert.False(t, m.IncludePrivateKeys) + assert.Equal(t, []string{"self"}, m.Clusters) + // Phase 2: with default flags, logs collection is enabled and the + // default caps are recorded. The actual collection is a no-op here + // because PodCheck didn't find an operator pod (cc.Pod is nil), so + // no logs/ entries appear in the zip — but the manifest still + // records the policy that *would* have been applied. + assert.True(t, m.LogsCollected) + assert.Equal(t, int64(5*1024*1024), m.LogsLimitBytes) + assert.Equal(t, int64(5000), m.LogsTailLines) + for fname := range files { + assert.NotContains(t, fname, "/logs/", "no log files when no pod was found") + } + // Phase 3: with default flags, metrics collection is enabled. As + // with logs, no actual scrape happens here because there is no pod + // — but the manifest records the policy (samples + interval) so a + // bundle reader can interpret t_metrics.txt filenames. + assert.True(t, m.MetricsCollected) + assert.Equal(t, 2, m.MetricsSamples) + assert.Equal(t, float64(10), m.MetricsIntervalSeconds) + for fname := range files { + assert.NotContains(t, fname, "/metrics/", "no metrics files when no pod was found") + } + + // checks.json under clusters// should be a JSON array (each entry + // is a checks.Result). We don't assert on the exact contents because + // they reflect the live check implementations, but the array shape and + // presence of a check name is enough to confirm the writer round-trips. + var clusterChecks []struct { + Name string `json:"Name"` + OK bool `json:"OK"` + Message string `json:"Message"` + } + require.NoError(t, json.Unmarshal(files["clusters/self/checks.json"], &clusterChecks)) + require.NotEmpty(t, clusterChecks, "checks.json should contain at least one check Result") + for _, c := range clusterChecks { + assert.NotEmpty(t, c.Name, "each Result should carry a Name") + } +} + +// TestIntegrationBundleDiscoverPeers exercises peer discovery against a real +// apiserver. It pre-populates two labelled cache Secrets on a starting +// envtest cluster — one well-formed pointing at a real second envtest, one +// malformed — and verifies that discoverPeers returns the well-formed peer +// as a working Connection and records the malformed one as a Warning rather +// than failing. +func TestIntegrationBundleDiscoverPeers(t *testing.T) { + testutil.SkipIfNotIntegration(t) + + ctx := testutil.Context(t) + + starting := kubetest.NewEnv(t) + peer := kubetest.NewEnv(t) + + const ns = "default" + + // Well-formed cache Secret pointing at the peer envtest. + wellFormed := buildCacheSecret(t, "test-kubeconfig-peer-a", ns, "peer-a", peerKubeconfig(t, "peer-a", peer)) + require.NoError(t, starting.Create(ctx, wellFormed)) + + // Malformed cache Secret: correctly labelled but missing the peer label, + // so discovery should record a Warning and skip it. + malformed := buildCacheSecret(t, "test-kubeconfig-peer-b", ns, "", []byte("not a kubeconfig")) + delete(malformed.Labels, mcpkg.MulticlusterPeerLabel) + require.NoError(t, starting.Create(ctx, malformed)) + + // And a Secret in the namespace that's NOT a cache (no labels) — must + // be ignored entirely by the label selector, not even appearing in + // Warnings. + require.NoError(t, starting.Create(ctx, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "unrelated", Namespace: ns}, + Data: map[string][]byte{"some-key": []byte("some-value")}, + })) + + cfg := &multicluster.BundleConfig{ + Connection: multicluster.ConnectionConfig{ + Namespace: ns, + ServiceName: "operator", + Connections: []multicluster.ClusterConnection{ + {Name: "self", Ctl: starting, SecretPrefix: "self"}, + }, + }, + ClusterChecks: fastTestChecks, + Now: func() time.Time { return time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) }, + } + + var buf bytes.Buffer + res, err := cfg.Run(ctx, &buf) + require.NoError(t, err) + require.NotNil(t, res) + + // Roster: starting + peer-a. The malformed Secret must NOT have produced + // a peer entry, but it MUST have produced an errors.txt warning. + got := make([]string, 0, len(res.Contexts)) + for _, cc := range res.Contexts { + got = append(got, cc.Context) + } + sort.Strings(got) + require.Equal(t, []string{"peer-a", "self"}, got) + + // Errors must mention the malformed Secret by name (so the operator can + // chase down which one is broken). + require.NotEmpty(t, res.Errors, "malformed cache Secret should produce a non-fatal Warning") + var sawMalformed bool + for _, e := range res.Errors { + if bytes.Contains([]byte(e), []byte("test-kubeconfig-peer-b")) { + sawMalformed = true + break + } + } + assert.True(t, sawMalformed, "errors should reference the malformed Secret by name; got: %q", res.Errors) + + // errors.txt must be present in the zip. + files := readZipFiles(t, buf.Bytes()) + require.Contains(t, files, "errors.txt") + + // Both the starting cluster and the discovered peer should each have + // their own clusters//checks.json. + require.Contains(t, files, "clusters/self/checks.json") + require.Contains(t, files, "clusters/peer-a/checks.json") +} + +// TestRedactSecret locks down the default redaction behaviour. It is +// deliberately a pure unit test (no envtest) so a regression in the +// redaction defaults fails fast in CI. +func TestRedactSecret(t *testing.T) { + in := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example", + Namespace: "default", + ManagedFields: []metav1.ManagedFieldsEntry{ + {Manager: "noisy", Operation: metav1.ManagedFieldsOperationUpdate}, + }, + }, + Data: map[string][]byte{ + "tls.crt": []byte("PUBLIC-CRT-DATA"), + "tls.key": []byte("PRIVATE-KEY-DATA"), + "ca.crt": []byte("CA-DATA"), + "kubeconfig.yaml": []byte("PEER-CREDS"), + "opaque": []byte("opaque-value"), + }, + } + + t.Run("default redacts private material and managed fields", func(t *testing.T) { + out := multicluster.ExportRedactSecretForTest(in, false) + assert.Empty(t, out.ManagedFields, "managedFields should be stripped unconditionally") + assert.Equal(t, "REDACTED", string(out.Data["tls.key"])) + assert.Equal(t, "REDACTED", string(out.Data["kubeconfig.yaml"])) + // Public material survives. + assert.Equal(t, "PUBLIC-CRT-DATA", string(out.Data["tls.crt"])) + assert.Equal(t, "CA-DATA", string(out.Data["ca.crt"])) + // Unrelated keys are untouched. + assert.Equal(t, "opaque-value", string(out.Data["opaque"])) + // Mutating the output must not affect the input. + out.Data["tls.crt"] = []byte("clobbered") + assert.Equal(t, "PUBLIC-CRT-DATA", string(in.Data["tls.crt"]), + "redactSecret must return a deep copy") + }) + + t.Run("includePrivateKeys disables redaction", func(t *testing.T) { + out := multicluster.ExportRedactSecretForTest(in, true) + assert.Equal(t, "PRIVATE-KEY-DATA", string(out.Data["tls.key"])) + assert.Equal(t, "PEER-CREDS", string(out.Data["kubeconfig.yaml"])) + assert.Empty(t, out.ManagedFields, "managedFields are stripped regardless of includePrivateKeys") + }) +} + +// readZipFiles returns a map of zip-internal path -> contents from a buffer +// containing a complete zip archive. Test helper. +func readZipFiles(t *testing.T, b []byte) map[string][]byte { + t.Helper() + zr, err := zip.NewReader(bytes.NewReader(b), int64(len(b))) + require.NoError(t, err) + out := make(map[string][]byte, len(zr.File)) + for _, f := range zr.File { + rc, err := f.Open() + require.NoError(t, err) + data, err := io.ReadAll(rc) + _ = rc.Close() + require.NoError(t, err) + out[f.Name] = data + } + return out +} + +// buildCacheSecret returns a Secret with the labels written by the +// multicluster operator's writeCachedKubeconfig and the given peer name +// and kubeconfig payload. +func buildCacheSecret(t *testing.T, name, namespace, peerName string, payload []byte) *corev1.Secret { + t.Helper() + labels := map[string]string{ + mcpkg.KubeconfigCacheComponentLabel: mcpkg.KubeconfigCacheComponentValue, + mcpkg.KubeconfigCacheManagedByLabel: mcpkg.KubeconfigCacheManagedByValue, + } + if peerName != "" { + labels[mcpkg.MulticlusterPeerLabel] = peerName + } + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Data: map[string][]byte{"kubeconfig.yaml": payload}, + } +} + +// peerKubeconfig serialises a kube.Ctl's REST config as kubeconfig YAML +// bytes that LoadKubeconfigFromBytes can parse back into a *rest.Config. +// envtest uses client-cert auth, so we embed the cert/key/CA directly. +func peerKubeconfig(t *testing.T, name string, ctl *kube.Ctl) []byte { + t.Helper() + cfg := ctl.RestConfig() + require.NotNil(t, cfg, "kube.Ctl must expose its REST config") + return restConfigToKubeconfigYAML(t, name, cfg) +} + +func restConfigToKubeconfigYAML(t *testing.T, name string, cfg *rest.Config) []byte { + t.Helper() + kc := clientcmdapi.NewConfig() + kc.Clusters[name] = &clientcmdapi.Cluster{ + Server: cfg.Host, + CertificateAuthorityData: cfg.CAData, + } + kc.AuthInfos[name] = &clientcmdapi.AuthInfo{ + ClientCertificateData: cfg.CertData, + ClientKeyData: cfg.KeyData, + } + kc.Contexts[name] = &clientcmdapi.Context{Cluster: name, AuthInfo: name} + kc.CurrentContext = name + out, err := clientcmd.Write(*kc) + require.NoError(t, err) + return out +} + +// generateTestCert is a small helper to build an x509.Certificate for the +// pure-Go redact / serialisation tests. Currently unused at the test level +// but kept so future tests for writeClusterArtifacts can populate +// CACert/TLSCert without spinning up cert-manager. +// +//nolint:unused // retained for forthcoming serialisation tests +func generateTestCert(t *testing.T, cn string) *x509.Certificate { + t.Helper() + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + tpl := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{CommonName: cn}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour), + } + der, err := x509.CreateCertificate(rand.Reader, tpl, tpl, &priv.PublicKey, priv) + require.NoError(t, err) + c, err := x509.ParseCertificate(der) + require.NoError(t, err) + return c +} + +// silence unused-import warnings while the helpers above carry stubs for +// future tests. +var _ = context.Background diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/bundle_write.go b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_write.go new file mode 100644 index 000000000..371eac560 --- /dev/null +++ b/operator/cmd/rpk-k8s/k8s/multicluster/bundle_write.go @@ -0,0 +1,258 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package multicluster + +import ( + "archive/zip" + "crypto/x509" + "encoding/json" + "encoding/pem" + "fmt" + "io" + "path" + "sort" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/redpanda-data/redpanda-operator/operator/cmd/rpk-k8s/k8s/multicluster/checks" +) + +// bundleWriter wraps a zip.Writer with helpers that serialise the structured +// state accumulated by the check pipeline into the bundle file tree. Errors +// from individual writes are returned to the caller so the caller can +// accumulate them in errors.txt rather than failing the whole bundle. +type bundleWriter struct { + zw *zip.Writer +} + +func newBundleWriter(w io.Writer) *bundleWriter { + return &bundleWriter{zw: zip.NewWriter(w)} +} + +func (b *bundleWriter) Close() error { return b.zw.Close() } + +// writeBytes writes `data` to the zip at the given path. Empty paths are +// rejected as a programming error. +func (b *bundleWriter) writeBytes(p string, data []byte) error { + if p == "" { + return fmt.Errorf("bundleWriter: empty path") + } + f, err := b.zw.Create(p) + if err != nil { + return fmt.Errorf("creating zip entry %s: %w", p, err) + } + if _, err := f.Write(data); err != nil { + return fmt.Errorf("writing zip entry %s: %w", p, err) + } + return nil +} + +// writeJSON marshals `v` as indented JSON and writes it. +func (b *bundleWriter) writeJSON(p string, v any) error { + data, err := json.MarshalIndent(v, "", " ") + if err != nil { + return fmt.Errorf("marshalling %s: %w", p, err) + } + return b.writeBytes(p, append(data, '\n')) +} + +// writeYAML marshals `v` as YAML (via sigs.k8s.io/yaml so JSON tags on +// Kubernetes types work) and writes it. +func (b *bundleWriter) writeYAML(p string, v any) error { + data, err := yaml.Marshal(v) + if err != nil { + return fmt.Errorf("marshalling %s: %w", p, err) + } + return b.writeBytes(p, data) +} + +// writeText writes a string verbatim. +func (b *bundleWriter) writeText(p, s string) error { + return b.writeBytes(p, []byte(s)) +} + +// writeManifest writes a top-level manifest.json describing the bundle's +// shape, the contexts that were diagnosed, and the redaction settings used. +// Future bundle readers (and any downstream tooling) need this to interpret +// the bundle without reverse-engineering its layout. +type bundleManifest struct { + SchemaVersion int `json:"schemaVersion"` + GeneratedAt time.Time `json:"generatedAt"` + Namespace string `json:"namespace"` + ServiceName string `json:"serviceName"` + IncludePrivateKeys bool `json:"includePrivateKeys"` + Clusters []string `json:"clusters"` + // LogsCollected reports whether operator pod logs were collected on + // this run. When true, LogsLimitBytes / LogsTailLines record the caps + // applied (0 means no cap). + LogsCollected bool `json:"logsCollected"` + LogsLimitBytes int64 `json:"logsLimitBytes,omitempty"` + LogsTailLines int64 `json:"logsTailLines,omitempty"` + // MetricsCollected reports whether operator /metrics scraping was + // attempted on this run. False when --skip-metrics was passed; true + // otherwise (the actual scrape may still no-op per cluster when the + // operator was deployed without --metrics-bind-address). + MetricsCollected bool `json:"metricsCollected"` + // MetricsSamples records the configured number of /metrics samples + // per cluster. 0 when MetricsCollected is false. Stored so a bundle + // reader can interpret t_metrics.txt filenames. + MetricsSamples int `json:"metricsSamples,omitempty"` + // MetricsIntervalSeconds is the configured wall-clock interval + // between successive samples, in seconds. 0 when MetricsCollected is + // false. + MetricsIntervalSeconds float64 `json:"metricsIntervalSeconds,omitempty"` +} + +const bundleSchemaVersion = 1 + +func (b *bundleWriter) writeManifestFile(cfg *BundleConfig, contexts []*checks.CheckContext, generatedAt time.Time, logs LogsOptions, metrics MetricsOptions) error { + clusters := make([]string, 0, len(contexts)) + for _, cc := range contexts { + clusters = append(clusters, cc.Context) + } + sort.Strings(clusters) + m := bundleManifest{ + SchemaVersion: bundleSchemaVersion, + GeneratedAt: generatedAt, + Namespace: cfg.Connection.Namespace, + ServiceName: cfg.Connection.ServiceName, + IncludePrivateKeys: cfg.IncludePrivateKeys, + Clusters: clusters, + LogsCollected: !cfg.SkipLogs, + MetricsCollected: !cfg.SkipMetrics, + } + if !cfg.SkipLogs { + m.LogsLimitBytes = logs.LimitBytes + m.LogsTailLines = logs.TailLines + } + if !cfg.SkipMetrics { + m.MetricsSamples = metrics.Samples + m.MetricsIntervalSeconds = metrics.Interval.Seconds() + } + return b.writeJSON("manifest.json", m) +} + +// writeStatusTable renders the same human-readable status table that the +// `status` command prints, plus the issues + cross-cluster sections, into a +// single status.txt at the bundle root. +func (b *bundleWriter) writeStatusTable(contexts []*checks.CheckContext, clusterResults [][]checks.Result, crossResults []checks.Result) error { + var buf strings.Builder + printStatusTable(&buf, contexts) + printClusterResults(&buf, contexts, clusterResults) + printCrossClusterResults(&buf, crossResults) + return b.writeText("status.txt", buf.String()) +} + +// writeClusterArtifacts serialises the state accumulated on a single +// CheckContext (Pod, Deployment, TLS material, raft status) plus its check +// Results into clusters//. +// +// Any per-artifact serialisation error is returned wrapped; the caller +// records it in errors.txt and continues with the next cluster so a single +// bad context can't lose the whole bundle. +func (b *bundleWriter) writeClusterArtifacts(cc *checks.CheckContext, results []checks.Result, includePrivateKeys bool) []error { + root := path.Join("clusters", cc.Context) + var errs []error + mark := func(err error) { + if err != nil { + errs = append(errs, err) + } + } + + mark(b.writeJSON(path.Join(root, "checks.json"), results)) + + if cc.Pod != nil { + mark(b.writeYAML(path.Join(root, "pod.yaml"), pruneObjectMeta(cc.Pod))) + } + if cc.Deployment != nil { + mark(b.writeYAML(path.Join(root, "deployment.yaml"), pruneObjectMeta(cc.Deployment))) + } + if len(cc.DeployArgs) > 0 { + mark(b.writeText(path.Join(root, "deploy-args.txt"), strings.Join(cc.DeployArgs, "\n")+"\n")) + } + + if cc.CACert != nil { + mark(b.writeBytes(path.Join(root, "tls", "ca.crt"), pemEncodeCert(cc.CACert))) + } + if cc.TLSCert != nil { + mark(b.writeBytes(path.Join(root, "tls", "tls.crt"), pemEncodeCert(cc.TLSCert))) + } + if cc.TLSSecret != nil { + mark(b.writeYAML(path.Join(root, "tls", "tls-secret.yaml"), redactSecret(cc.TLSSecret, includePrivateKeys))) + } + mark(b.writeText(path.Join(root, "tls", "tls-key-match.txt"), fmt.Sprintf("%v\n", cc.TLSKeyMatch))) + + if cc.RaftStatus != nil { + mark(b.writeJSON(path.Join(root, "raft-status.json"), cc.RaftStatus)) + } + + return errs +} + +// writeCrossClusterArtifacts writes the cross-cluster check Results. +func (b *bundleWriter) writeCrossClusterArtifacts(results []checks.Result) error { + return b.writeJSON(path.Join("cross-cluster", "checks.json"), results) +} + +// writeErrors writes accumulated non-fatal collection errors to errors.txt. +// Mirrors the convention from `rpk debug bundle`. Empty input writes nothing. +func (b *bundleWriter) writeErrors(messages []string) error { + if len(messages) == 0 { + return nil + } + return b.writeText("errors.txt", strings.Join(messages, "\n")+"\n") +} + +// pemEncodeCert returns a PEM-encoded CERTIFICATE block for c. +func pemEncodeCert(c *x509.Certificate) []byte { + return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: c.Raw}) +} + +// pruneObjectMeta strips noisy server-side metadata (managedFields, the +// server-generated resourceVersion, the self link) from a Kubernetes object +// before it's serialised into the bundle. The intent is reviewability: the +// fields removed are 90% of the visual noise in a kubectl get -o yaml dump +// and add nothing to a diagnostic. +func pruneObjectMeta[T client.Object](o T) T { + out := o.DeepCopyObject().(T) + out.SetManagedFields(nil) + // Don't strip ResourceVersion / UID — they can be useful for ticket + // cross-referencing. The big offender is managedFields. + return out +} + +// redactSecret returns a copy of s with sensitive data keys redacted, unless +// includePrivateKeys is set. The default redacts: +// +// - tls.key (private key half of an mTLS cert) +// - kubeconfig.yaml (peer-cluster credentials cached by the multicluster +// operator; their presence here would let a bundle reader access every +// peer cluster, defeating the point of running the bundle from a single +// starting cluster). +// +// Returned object is safe to mutate and to serialise. +func redactSecret(s *corev1.Secret, includePrivateKeys bool) *corev1.Secret { + out := s.DeepCopy() + out.ManagedFields = nil + if includePrivateKeys { + return out + } + const redacted = "REDACTED" + for _, key := range []string{"tls.key", "kubeconfig.yaml"} { + if _, ok := out.Data[key]; ok { + out.Data[key] = []byte(redacted) + } + } + return out +} diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/checks/cluster_tls.go b/operator/cmd/rpk-k8s/k8s/multicluster/checks/cluster_tls.go index 196473e72..770989338 100644 --- a/operator/cmd/rpk-k8s/k8s/multicluster/checks/cluster_tls.go +++ b/operator/cmd/rpk-k8s/k8s/multicluster/checks/cluster_tls.go @@ -30,22 +30,22 @@ type TLSCheck struct{} func (c *TLSCheck) Name() string { return "tls" } func (c *TLSCheck) Run(ctx context.Context, cc *CheckContext) []Result { + // Prefer matching the deployment's volume so we read the secret name the + // operator was actually configured with. Falls back to a scan-by-suffix + // listing — which is the correct last resort because synthesising a name + // from SecretPrefix has no relation to how the chart names the secret + // (and SecretPrefix may even be a kube context name like + // "vcluster_..._k3d-harpoon" with characters illegal in Secret names). secretName := c.findSecretName(cc) if secretName == "" { - // Fall back: if we know the expected secret name, try it directly. - if cc.SecretPrefix != "" { - secretName = cc.SecretPrefix + "-multicluster-certificates" - } else { - // Last resort: scan all secrets by suffix. - var secrets corev1.SecretList - if err := cc.Ctl.List(ctx, cc.Namespace, &secrets); err != nil { - return []Result{Fail(c.Name(), fmt.Sprintf("listing secrets: %v", err))} - } - for _, sec := range secrets.Items { - if strings.HasSuffix(sec.Name, "-multicluster-certificates") { - secretName = sec.Name - break - } + var secrets corev1.SecretList + if err := cc.Ctl.List(ctx, cc.Namespace, &secrets); err != nil { + return []Result{Fail(c.Name(), fmt.Sprintf("listing secrets: %v", err))} + } + for _, sec := range secrets.Items { + if strings.HasSuffix(sec.Name, "-multicluster-certificates") { + secretName = sec.Name + break } } } @@ -54,13 +54,11 @@ func (c *TLSCheck) Run(ctx context.Context, cc *CheckContext) []Result { return []Result{Fail(c.Name(), fmt.Sprintf("no multicluster-certificates secret found in namespace %s", cc.Namespace))} } + // Snapshot read — Get, not WaitFor. A debug bundle should never block on + // cert-manager finishing; if the secret is mid-rotation or missing, that + // is itself the diagnostic and Fail() carries it through to checks.json. var secret corev1.Secret - secret.Name = secretName - secret.Namespace = cc.Namespace - err := cc.Ctl.WaitFor(ctx, &secret, func(_ kube.Object, err error) (bool, error) { - return err == nil, nil - }) - if err != nil { + if err := cc.Ctl.Get(ctx, kube.ObjectKey{Namespace: cc.Namespace, Name: secretName}, &secret); err != nil { return []Result{Fail(c.Name(), fmt.Sprintf("cannot read secret %s: %v", secretName, err))} } cc.TLSSecret = &secret diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/export_test.go b/operator/cmd/rpk-k8s/k8s/multicluster/export_test.go new file mode 100644 index 000000000..27cd5c24d --- /dev/null +++ b/operator/cmd/rpk-k8s/k8s/multicluster/export_test.go @@ -0,0 +1,20 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package multicluster + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ExportRedactSecretForTest exposes the package-private redactSecret to the +// external _test package. Test-only. +func ExportRedactSecretForTest(s *corev1.Secret, includePrivateKeys bool) *corev1.Secret { + return redactSecret(s, includePrivateKeys) +} diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/multicluster.go b/operator/cmd/rpk-k8s/k8s/multicluster/multicluster.go index 328ebcb91..26b906ea0 100644 --- a/operator/cmd/rpk-k8s/k8s/multicluster/multicluster.go +++ b/operator/cmd/rpk-k8s/k8s/multicluster/multicluster.go @@ -22,6 +22,7 @@ func Command() *cobra.Command { cmd.AddCommand( bootstrapCommand(), + bundleCommand(), statusCommand(), ) diff --git a/operator/go.mod b/operator/go.mod index 53cdca6ea..20e653a8e 100644 --- a/operator/go.mod +++ b/operator/go.mod @@ -10,6 +10,7 @@ require ( github.com/cert-manager/cert-manager v1.19.3 github.com/cisco-open/k8s-objectmatcher v1.9.0 github.com/cockroachdb/errors v1.12.0 + github.com/docker/go-units v0.5.0 github.com/fluxcd/pkg/runtime v0.43.3 github.com/fsnotify/fsnotify v1.9.0 github.com/go-logr/logr v1.4.3 @@ -148,7 +149,6 @@ require ( github.com/distribution/reference v0.6.0 // indirect github.com/docker/docker v28.5.2+incompatible // indirect github.com/docker/go-connections v0.6.0 // indirect - github.com/docker/go-units v0.5.0 // indirect github.com/ebitengine/purego v0.9.1 // indirect github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/evanphx/json-patch v5.9.11+incompatible // indirect diff --git a/operator/internal/lifecycle/testdata/stretch-cluster-cases.resources.golden.txtar b/operator/internal/lifecycle/testdata/stretch-cluster-cases.resources.golden.txtar index bc3786c0b..169174343 100644 --- a/operator/internal/lifecycle/testdata/stretch-cluster-cases.resources.golden.txtar +++ b/operator/internal/lifecycle/testdata/stretch-cluster-cases.resources.golden.txtar @@ -1371,6 +1371,23 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: "" + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: compat-test + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: compat-test + cluster.redpanda.com/owner: compat-test + name: compat-test-compat-test-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -1392,6 +1409,26 @@ - kind: ServiceAccount name: compat-test namespace: compat-test +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: "" + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: compat-test + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: compat-test + cluster.redpanda.com/owner: compat-test + name: compat-test-compat-test-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: compat-test-compat-test-metrics-reader + subjects: + - kind: ServiceAccount + name: compat-test + namespace: compat-test - apiVersion: v1 kind: Secret metadata: @@ -2124,6 +2161,23 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: "" + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: flat-network-test + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: flat-network-test + cluster.redpanda.com/owner: flat-network-test + name: flat-network-test-flat-network-test-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -2145,6 +2199,26 @@ - kind: ServiceAccount name: flat-network-test namespace: flat-network-test +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: "" + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: flat-network-test + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: flat-network-test + cluster.redpanda.com/owner: flat-network-test + name: flat-network-test-flat-network-test-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flat-network-test-flat-network-test-metrics-reader + subjects: + - kind: ServiceAccount + name: flat-network-test + namespace: flat-network-test - apiVersion: v1 kind: Secret metadata: @@ -2864,6 +2938,23 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: "" + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: mcs-network-test + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: mcs-network-test + cluster.redpanda.com/owner: mcs-network-test + name: mcs-network-test-mcs-network-test-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -2885,6 +2976,26 @@ - kind: ServiceAccount name: mcs-network-test namespace: mcs-network-test +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: "" + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: mcs-network-test + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: mcs-network-test + cluster.redpanda.com/owner: mcs-network-test + name: mcs-network-test-mcs-network-test-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: mcs-network-test-mcs-network-test-metrics-reader + subjects: + - kind: ServiceAccount + name: mcs-network-test + namespace: mcs-network-test - apiVersion: v1 kind: Secret metadata: @@ -3724,6 +3835,23 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: "" + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: nodepool-basic-test + cluster.redpanda.com/owner: nodepool-basic-test + name: nodepool-basic-test-nodepool-basic-test-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -3745,6 +3873,26 @@ - kind: ServiceAccount name: nodepool-basic-test namespace: nodepool-basic-test +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: "" + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: nodepool-basic-test + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + cluster.redpanda.com/namespace: nodepool-basic-test + cluster.redpanda.com/owner: nodepool-basic-test + name: nodepool-basic-test-nodepool-basic-test-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nodepool-basic-test-nodepool-basic-test-metrics-reader + subjects: + - kind: ServiceAccount + name: nodepool-basic-test + namespace: nodepool-basic-test - apiVersion: v1 kind: Secret metadata: diff --git a/operator/multicluster/rbac.go b/operator/multicluster/rbac.go index 9baa0cffd..1f8ad14af 100644 --- a/operator/multicluster/rbac.go +++ b/operator/multicluster/rbac.go @@ -95,6 +95,31 @@ func clusterRoles(state *RenderState) []*rbacv1.ClusterRole { var clusterRoles []*rbacv1.ClusterRole + // Self-metrics ClusterRole: grants nonResourceURLs:["/metrics"] get. + // controller-runtime's metrics server enforces auth+authz on /metrics + // by default, so anything authenticating as the operator SA — the + // bundled ServiceMonitor scraping with the pod's projected token, and + // `rpk k8s multicluster bundle` scraping with a TokenRequest-minted + // SA token — needs this rule. The clusterRoleBindings function below + // binds every emitted ClusterRole to the operator SA, so this entry + // alone is enough to unblock both consumers. + clusterRoles = append(clusterRoles, &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRole", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: tplutil.CleanForK8s(fmt.Sprintf("%s-%s-metrics-reader", state.fullname(), state.namespace)), + Labels: state.commonLabels(), + }, + Rules: []rbacv1.PolicyRule{ + { + NonResourceURLs: []string{"/metrics"}, + Verbs: []string{"get"}, + }, + }, + }) + if state.Spec().RackAwareness.IsEnabled() { clusterRoles = append(clusterRoles, &rbacv1.ClusterRole{ TypeMeta: metav1.TypeMeta{ diff --git a/operator/multicluster/testdata/render-cases.resources.golden.txtar b/operator/multicluster/testdata/render-cases.resources.golden.txtar index 5c528b4e9..f39a5f19a 100644 --- a/operator/multicluster/testdata/render-cases.resources.golden.txtar +++ b/operator/multicluster/testdata/render-cases.resources.golden.txtar @@ -467,6 +467,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: audit-logging + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: audit-logging-audit-logging-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -486,6 +501,24 @@ - kind: ServiceAccount name: audit-logging namespace: audit-logging +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: audit-logging + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: audit-logging-audit-logging-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: audit-logging-audit-logging-metrics-reader + subjects: + - kind: ServiceAccount + name: audit-logging + namespace: audit-logging - apiVersion: v1 kind: Secret metadata: @@ -1237,6 +1270,23 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: common-labels + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + env: staging + team: platform + name: common-labels-common-labels-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -1258,6 +1308,26 @@ - kind: ServiceAccount name: common-labels namespace: common-labels +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: common-labels + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + env: staging + team: platform + name: common-labels-common-labels-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: common-labels-common-labels-metrics-reader + subjects: + - kind: ServiceAccount + name: common-labels + namespace: common-labels - apiVersion: v1 kind: Secret metadata: @@ -1997,6 +2067,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: custom-cluster-domain + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: custom-cluster-domain-custom-cluster-domain-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -2016,6 +2101,24 @@ - kind: ServiceAccount name: custom-cluster-domain namespace: custom-cluster-domain +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: custom-cluster-domain + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: custom-cluster-domain-custom-cluster-domain-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: custom-cluster-domain-custom-cluster-domain-metrics-reader + subjects: + - kind: ServiceAccount + name: custom-cluster-domain + namespace: custom-cluster-domain - apiVersion: v1 kind: Secret metadata: @@ -2746,6 +2849,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: custom-config + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: custom-config-custom-config-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -2765,6 +2883,24 @@ - kind: ServiceAccount name: custom-config namespace: custom-config +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: custom-config + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: custom-config-custom-config-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: custom-config-custom-config-metrics-reader + subjects: + - kind: ServiceAccount + name: custom-config + namespace: custom-config - apiVersion: v1 kind: Secret metadata: @@ -3494,6 +3630,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: custom-image + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: custom-image-custom-image-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -3513,6 +3664,24 @@ - kind: ServiceAccount name: custom-image namespace: custom-image +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: custom-image + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: custom-image-custom-image-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: custom-image-custom-image-metrics-reader + subjects: + - kind: ServiceAccount + name: custom-image + namespace: custom-image - apiVersion: v1 kind: Secret metadata: @@ -4242,6 +4411,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: custom-resources + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: custom-resources-custom-resources-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -4261,6 +4445,24 @@ - kind: ServiceAccount name: custom-resources namespace: custom-resources +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: custom-resources + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: custom-resources-custom-resources-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: custom-resources-custom-resources-metrics-reader + subjects: + - kind: ServiceAccount + name: custom-resources + namespace: custom-resources - apiVersion: v1 kind: Secret metadata: @@ -4990,6 +5192,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: custom-resources-explicit + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: custom-resources-explicit-custom-resources-explicit-metrics-rea + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -5009,6 +5226,24 @@ - kind: ServiceAccount name: custom-resources-explicit namespace: custom-resources-explicit +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: custom-resources-explicit + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: custom-resources-explicit-custom-resources-explicit-metrics-rea + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: custom-resources-explicit-custom-resources-explicit-metrics-rea + subjects: + - kind: ServiceAccount + name: custom-resources-explicit + namespace: custom-resources-explicit - apiVersion: v1 kind: Secret metadata: @@ -5738,6 +5973,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: enterprise + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: enterprise-enterprise-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -5757,6 +6007,24 @@ - kind: ServiceAccount name: enterprise namespace: enterprise +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: enterprise + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: enterprise-enterprise-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: enterprise-enterprise-metrics-reader + subjects: + - kind: ServiceAccount + name: enterprise + namespace: enterprise - apiVersion: v1 kind: Secret metadata: @@ -6368,6 +6636,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: external-loadbalancer + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: external-loadbalancer-external-loadbalancer-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -6387,6 +6670,24 @@ - kind: ServiceAccount name: external-loadbalancer namespace: external-loadbalancer +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: external-loadbalancer + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: external-loadbalancer-external-loadbalancer-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-loadbalancer-external-loadbalancer-metrics-reader + subjects: + - kind: ServiceAccount + name: external-loadbalancer + namespace: external-loadbalancer - apiVersion: v1 kind: Service metadata: @@ -7231,6 +7532,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: external-nodeport + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: external-nodeport-external-nodeport-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -7250,6 +7566,24 @@ - kind: ServiceAccount name: external-nodeport namespace: external-nodeport +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: external-nodeport + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: external-nodeport-external-nodeport-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-nodeport-external-nodeport-metrics-reader + subjects: + - kind: ServiceAccount + name: external-nodeport + namespace: external-nodeport - apiVersion: v1 kind: Secret metadata: @@ -7885,6 +8219,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: external-tls + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: external-tls-external-tls-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -7904,6 +8253,24 @@ - kind: ServiceAccount name: external-tls namespace: external-tls +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: external-tls + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: external-tls-external-tls-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-tls-external-tls-metrics-reader + subjects: + - kind: ServiceAccount + name: external-tls + namespace: external-tls - apiVersion: v1 kind: Service metadata: @@ -8746,6 +9113,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: flat-network + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: flat-network-flat-network-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -8765,6 +9147,24 @@ - kind: ServiceAccount name: flat-network namespace: flat-network +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: flat-network + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: flat-network-flat-network-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flat-network-flat-network-metrics-reader + subjects: + - kind: ServiceAccount + name: flat-network + namespace: flat-network - apiVersion: v1 kind: Secret metadata: @@ -9765,6 +10165,22 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: full-featured + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + team: data-platform + name: full-featured-full-featured-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -9805,6 +10221,25 @@ - kind: ServiceAccount name: full-featured namespace: full-featured +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: full-featured + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + team: data-platform + name: full-featured-full-featured-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: full-featured-full-featured-metrics-reader + subjects: + - kind: ServiceAccount + name: full-featured + namespace: full-featured - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -10982,6 +11417,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: init-containers + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: init-containers-init-containers-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -11001,6 +11451,24 @@ - kind: ServiceAccount name: init-containers namespace: init-containers +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: init-containers + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: init-containers-init-containers-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: init-containers-init-containers-metrics-reader + subjects: + - kind: ServiceAccount + name: init-containers + namespace: init-containers - apiVersion: v1 kind: Secret metadata: @@ -11780,6 +12248,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: mcs-network + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: mcs-network-mcs-network-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -11799,6 +12282,24 @@ - kind: ServiceAccount name: mcs-network namespace: mcs-network +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: mcs-network + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: mcs-network-mcs-network-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: mcs-network-mcs-network-metrics-reader + subjects: + - kind: ServiceAccount + name: mcs-network + namespace: mcs-network - apiVersion: v1 kind: Secret metadata: @@ -12510,6 +13011,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: memory-locking + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: memory-locking-memory-locking-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -12529,6 +13045,24 @@ - kind: ServiceAccount name: memory-locking namespace: memory-locking +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: memory-locking + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: memory-locking-memory-locking-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: memory-locking-memory-locking-metrics-reader + subjects: + - kind: ServiceAccount + name: memory-locking + namespace: memory-locking - apiVersion: v1 kind: Secret metadata: @@ -13258,6 +13792,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: minimal + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: minimal-minimal-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -13277,6 +13826,24 @@ - kind: ServiceAccount name: minimal namespace: minimal +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: minimal + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: minimal-minimal-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: minimal-minimal-metrics-reader + subjects: + - kind: ServiceAccount + name: minimal + namespace: minimal - apiVersion: v1 kind: Secret metadata: @@ -14032,6 +14599,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: monitoring + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: monitoring-monitoring-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -14051,6 +14633,24 @@ - kind: ServiceAccount name: monitoring namespace: monitoring +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: monitoring + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: monitoring-monitoring-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: monitoring-monitoring-metrics-reader + subjects: + - kind: ServiceAccount + name: monitoring + namespace: monitoring - apiVersion: v1 kind: Secret metadata: @@ -14996,7 +15596,22 @@ - list - watch - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: multi-pool + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: multi-pool-multi-pool-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding metadata: labels: app.kubernetes.io/cluster-name: test @@ -15014,6 +15629,24 @@ - kind: ServiceAccount name: multi-pool namespace: multi-pool +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: multi-pool + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: multi-pool-multi-pool-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: multi-pool-multi-pool-metrics-reader + subjects: + - kind: ServiceAccount + name: multi-pool + namespace: multi-pool - apiVersion: v1 kind: Secret metadata: @@ -15850,6 +16483,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: per-pod-service-overrides + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: per-pod-service-overrides-per-pod-service-overrides-metrics-rea + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -15869,6 +16517,24 @@ - kind: ServiceAccount name: per-pod-service-overrides namespace: per-pod-service-overrides +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: per-pod-service-overrides + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: per-pod-service-overrides-per-pod-service-overrides-metrics-rea + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: per-pod-service-overrides-per-pod-service-overrides-metrics-rea + subjects: + - kind: ServiceAccount + name: per-pod-service-overrides + namespace: per-pod-service-overrides - apiVersion: v1 kind: Secret metadata: @@ -16492,6 +17158,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: per-pod-service-remote-disabled + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: per-pod-service-remote-disabled-per-pod-service-remote-disabled + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -16511,6 +17192,24 @@ - kind: ServiceAccount name: per-pod-service-remote-disabled namespace: per-pod-service-remote-disabled +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: per-pod-service-remote-disabled + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: per-pod-service-remote-disabled-per-pod-service-remote-disabled + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: per-pod-service-remote-disabled-per-pod-service-remote-disabled + subjects: + - kind: ServiceAccount + name: per-pod-service-remote-disabled + namespace: per-pod-service-remote-disabled - apiVersion: v1 kind: Secret metadata: @@ -17151,6 +17850,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: rack-awareness + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: rack-awareness-rack-awareness-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -17189,6 +17903,24 @@ - kind: ServiceAccount name: rack-awareness namespace: rack-awareness +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: rack-awareness + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: rack-awareness-rack-awareness-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rack-awareness-rack-awareness-metrics-reader + subjects: + - kind: ServiceAccount + name: rack-awareness + namespace: rack-awareness - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -17946,6 +18678,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: sasl-scram256 + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: sasl-scram256-sasl-scram256-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -17965,6 +18712,24 @@ - kind: ServiceAccount name: sasl-scram256 namespace: sasl-scram256 +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: sasl-scram256 + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: sasl-scram256-sasl-scram256-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: sasl-scram256-sasl-scram256-metrics-reader + subjects: + - kind: ServiceAccount + name: sasl-scram256 + namespace: sasl-scram256 - apiVersion: v1 kind: Secret metadata: @@ -18713,6 +19478,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: sasl-scram512-with-tls + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: sasl-scram512-with-tls-sasl-scram512-with-tls-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -18732,6 +19512,24 @@ - kind: ServiceAccount name: sasl-scram512-with-tls namespace: sasl-scram512-with-tls +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: sasl-scram512-with-tls + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: sasl-scram512-with-tls-sasl-scram512-with-tls-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: sasl-scram512-with-tls-sasl-scram512-with-tls-metrics-reader + subjects: + - kind: ServiceAccount + name: sasl-scram512-with-tls + namespace: sasl-scram512-with-tls - apiVersion: v1 kind: Secret metadata: @@ -19456,6 +20254,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: single-replica + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: single-replica-single-replica-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -19475,6 +20288,24 @@ - kind: ServiceAccount name: single-replica namespace: single-replica +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: single-replica + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: single-replica-single-replica-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: single-replica-single-replica-metrics-reader + subjects: + - kind: ServiceAccount + name: single-replica + namespace: single-replica - apiVersion: v1 kind: Secret metadata: @@ -20115,6 +20946,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: storage-hostpath + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: storage-hostpath-storage-hostpath-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -20134,6 +20980,24 @@ - kind: ServiceAccount name: storage-hostpath namespace: storage-hostpath +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: storage-hostpath + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: storage-hostpath-storage-hostpath-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: storage-hostpath-storage-hostpath-metrics-reader + subjects: + - kind: ServiceAccount + name: storage-hostpath + namespace: storage-hostpath - apiVersion: v1 kind: Secret metadata: @@ -20863,6 +21727,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: storage-pv-custom + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: storage-pv-custom-storage-pv-custom-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -20882,6 +21761,24 @@ - kind: ServiceAccount name: storage-pv-custom namespace: storage-pv-custom +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: storage-pv-custom + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: storage-pv-custom-storage-pv-custom-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: storage-pv-custom-storage-pv-custom-metrics-reader + subjects: + - kind: ServiceAccount + name: storage-pv-custom + namespace: storage-pv-custom - apiVersion: v1 kind: Secret metadata: @@ -21611,6 +22508,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tiered-storage-emptydir + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tiered-storage-emptydir-tiered-storage-emptydir-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -21630,6 +22542,24 @@ - kind: ServiceAccount name: tiered-storage-emptydir namespace: tiered-storage-emptydir +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tiered-storage-emptydir + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tiered-storage-emptydir-tiered-storage-emptydir-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tiered-storage-emptydir-tiered-storage-emptydir-metrics-reader + subjects: + - kind: ServiceAccount + name: tiered-storage-emptydir + namespace: tiered-storage-emptydir - apiVersion: v1 kind: Secret metadata: @@ -22359,6 +23289,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tiered-storage-hostpath + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tiered-storage-hostpath-tiered-storage-hostpath-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -22378,6 +23323,24 @@ - kind: ServiceAccount name: tiered-storage-hostpath namespace: tiered-storage-hostpath +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tiered-storage-hostpath + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tiered-storage-hostpath-tiered-storage-hostpath-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tiered-storage-hostpath-tiered-storage-hostpath-metrics-reader + subjects: + - kind: ServiceAccount + name: tiered-storage-hostpath + namespace: tiered-storage-hostpath - apiVersion: v1 kind: Secret metadata: @@ -23107,6 +24070,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tiered-storage-pv + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tiered-storage-pv-tiered-storage-pv-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -23126,6 +24104,24 @@ - kind: ServiceAccount name: tiered-storage-pv namespace: tiered-storage-pv +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tiered-storage-pv + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tiered-storage-pv-tiered-storage-pv-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tiered-storage-pv-tiered-storage-pv-metrics-reader + subjects: + - kind: ServiceAccount + name: tiered-storage-pv + namespace: tiered-storage-pv - apiVersion: v1 kind: Secret metadata: @@ -23824,6 +24820,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tls-issuer-ref + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tls-issuer-ref-tls-issuer-ref-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -23843,6 +24854,24 @@ - kind: ServiceAccount name: tls-issuer-ref namespace: tls-issuer-ref +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tls-issuer-ref + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tls-issuer-ref-tls-issuer-ref-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tls-issuer-ref-tls-issuer-ref-metrics-reader + subjects: + - kind: ServiceAccount + name: tls-issuer-ref + namespace: tls-issuer-ref - apiVersion: v1 kind: Secret metadata: @@ -24574,6 +25603,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tls-issuer-ref-mtls + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tls-issuer-ref-mtls-tls-issuer-ref-mtls-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -24593,6 +25637,24 @@ - kind: ServiceAccount name: tls-issuer-ref-mtls namespace: tls-issuer-ref-mtls +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tls-issuer-ref-mtls + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tls-issuer-ref-mtls-tls-issuer-ref-mtls-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tls-issuer-ref-mtls-tls-issuer-ref-mtls-metrics-reader + subjects: + - kind: ServiceAccount + name: tls-issuer-ref-mtls + namespace: tls-issuer-ref-mtls - apiVersion: v1 kind: Secret metadata: @@ -25355,6 +26417,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tls-mtls + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tls-mtls-tls-mtls-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -25374,6 +26451,24 @@ - kind: ServiceAccount name: tls-mtls namespace: tls-mtls +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tls-mtls + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tls-mtls-tls-mtls-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tls-mtls-tls-mtls-metrics-reader + subjects: + - kind: ServiceAccount + name: tls-mtls + namespace: tls-mtls - apiVersion: v1 kind: Secret metadata: @@ -26103,6 +27198,21 @@ - get - list - watch +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tls-self-signed + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tls-self-signed-tls-self-signed-metrics-reader + rules: + - nonResourceURLs: + - /metrics + verbs: + - get - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -26122,6 +27232,24 @@ - kind: ServiceAccount name: tls-self-signed namespace: tls-self-signed +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app.kubernetes.io/cluster-name: test + app.kubernetes.io/component: redpanda + app.kubernetes.io/instance: tls-self-signed + app.kubernetes.io/managed-by: redpanda-operator + app.kubernetes.io/name: redpanda + name: tls-self-signed-tls-self-signed-metrics-reader + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tls-self-signed-tls-self-signed-metrics-reader + subjects: + - kind: ServiceAccount + name: tls-self-signed + namespace: tls-self-signed - apiVersion: v1 kind: Secret metadata: diff --git a/pkg/multicluster/load.go b/pkg/multicluster/load.go index b214faf40..556eb06a9 100644 --- a/pkg/multicluster/load.go +++ b/pkg/multicluster/load.go @@ -16,16 +16,22 @@ import ( "k8s.io/client-go/tools/clientcmd" ) -func loadKubeconfig(file string) (*rest.Config, error) { +// LoadKubeconfig reads a kubeconfig file from disk and returns its +// CurrentContext as a *rest.Config. +func LoadKubeconfig(file string) (*rest.Config, error) { kubeconfigYAML, err := os.ReadFile(file) if err != nil { return nil, err } - return loadKubeconfigFromBytes(kubeconfigYAML) + return LoadKubeconfigFromBytes(kubeconfigYAML) } -func loadKubeconfigFromBytes(kubeconfigYAML []byte) (*rest.Config, error) { +// LoadKubeconfigFromBytes parses kubeconfig YAML bytes and returns the +// CurrentContext as a *rest.Config. Used both by the operator's own +// raft-bootstrap flow (where the bytes come from a cached Secret) and by +// external tools that read those Secrets directly. +func LoadKubeconfigFromBytes(kubeconfigYAML []byte) (*rest.Config, error) { kubeconfig, err := clientcmd.Load(kubeconfigYAML) if err != nil { return nil, err diff --git a/pkg/multicluster/raft.go b/pkg/multicluster/raft.go index e8eb9e8b0..98bc178e7 100644 --- a/pkg/multicluster/raft.go +++ b/pkg/multicluster/raft.go @@ -55,14 +55,55 @@ func stringToHash(s string) uint64 { return h.Sum64() } -// kubeconfigCacheSecretName returns the name of the Secret used to cache a -// peer's kubeconfig in the local cluster. -func kubeconfigCacheSecretName(kubeconfigName, peerName string) string { +// Labels applied to peer-kubeconfig cache Secrets so external tools can +// discover them by label selector instead of having to know the operator's +// configured `--kubeconfig-name` prefix. The same label set is written by +// every CreateOrUpdate of a cache Secret. +const ( + // KubeconfigCacheComponentLabel is the standard component label key used + // to select cache Secrets. Pair with KubeconfigCacheComponentValue. + KubeconfigCacheComponentLabel = "app.kubernetes.io/component" + // KubeconfigCacheComponentValue identifies a Secret as a peer-kubeconfig + // cache entry written by the multicluster operator. + KubeconfigCacheComponentValue = "multicluster-kubeconfig-cache" + // KubeconfigCacheManagedByLabel is the standard managed-by label key. Set + // to KubeconfigCacheManagedByValue so `kubectl get secret -L + // app.kubernetes.io/managed-by` shows ownership at a glance. + KubeconfigCacheManagedByLabel = "app.kubernetes.io/managed-by" + // KubeconfigCacheManagedByValue identifies the writer of cache Secrets. + KubeconfigCacheManagedByValue = "redpanda-multicluster-operator" + // MulticlusterPeerLabel records the raft peer name a cache Secret holds a + // kubeconfig for. Lets readers recover the peer name without parsing the + // Secret's name (which depends on the operator's --kubeconfig-name flag). + MulticlusterPeerLabel = "cluster.redpanda.com/multicluster-peer" +) + +// KubeconfigCacheSecretName returns the name of the Secret used to cache a +// peer's kubeconfig in the local cluster. The format is +// "-", where kubeconfigName is the operator's +// configured prefix (RaftConfiguration.KubeconfigName / --kubeconfig-name). +func KubeconfigCacheSecretName(kubeconfigName, peerName string) string { return kubeconfigName + "-" + peerName } -// writeCachedKubeconfig writes kubeconfig bytes as a Secret in the local cluster. -func writeCachedKubeconfig(ctx context.Context, cl client.Client, name, namespace string, data []byte) error { +// kubeconfigCacheSecretLabels returns the label set applied to every peer +// kubeconfig cache Secret. peerName is recorded in MulticlusterPeerLabel so +// callers (e.g. the operator-bundle subcommand) can discover the secret by +// component selector and recover the peer name without parsing the Secret's +// name. +func kubeconfigCacheSecretLabels(peerName string) map[string]string { + return map[string]string{ + KubeconfigCacheComponentLabel: KubeconfigCacheComponentValue, + KubeconfigCacheManagedByLabel: KubeconfigCacheManagedByValue, + MulticlusterPeerLabel: peerName, + } +} + +// writeCachedKubeconfig writes kubeconfig bytes as a Secret in the local +// cluster. The Secret is labelled so external tools can discover all peer +// caches with a single label selector rather than having to know the +// operator's --kubeconfig-name prefix. +func writeCachedKubeconfig(ctx context.Context, cl client.Client, name, namespace, peerName string, data []byte) error { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -70,6 +111,15 @@ func writeCachedKubeconfig(ctx context.Context, cl client.Client, name, namespac }, } _, err := controllerutil.CreateOrUpdate(ctx, cl, secret, func() error { + // Re-apply on every reconcile so labels self-heal if a Secret was + // created by an older operator version, or if a user removed labels + // out of band. + if secret.Labels == nil { + secret.Labels = map[string]string{} + } + for k, v := range kubeconfigCacheSecretLabels(peerName) { + secret.Labels[k] = v + } secret.Data = map[string][]byte{"kubeconfig.yaml": data} return nil }) @@ -116,7 +166,7 @@ func (s *startupKubeconfigFetcher) Start(ctx context.Context) error { var failed []int for _, i := range pending { peer := s.config.Peers[i] - secretName := kubeconfigCacheSecretName(s.config.KubeconfigName, peer.Name) + secretName := KubeconfigCacheSecretName(s.config.KubeconfigName, peer.Name) s.logger.Info("startup: fetching kubeconfig for peer", "peer", peer.Name) grpcClient, err := leaderelection.ClientFor(s.raftConfig, s.raftConfig.Peers[i]) if err != nil { @@ -130,7 +180,7 @@ func (s *startupKubeconfigFetcher) Start(ctx context.Context) error { failed = append(failed, i) continue } - if err := writeCachedKubeconfig(ctx, s.client, secretName, s.config.KubeconfigNamespace, response.Payload); err != nil { + if err := writeCachedKubeconfig(ctx, s.client, secretName, s.config.KubeconfigNamespace, peer.Name, response.Payload); err != nil { s.logger.Error(err, "startup: failed to cache kubeconfig", "peer", peer.Name) failed = append(failed, i) continue @@ -344,7 +394,7 @@ func NewRaftRuntimeManager(config *RaftConfiguration) (Manager, error) { } if peer.KubeconfigFile != "" { - kubeConfig, err := loadKubeconfig(peer.KubeconfigFile) + kubeConfig, err := LoadKubeconfig(peer.KubeconfigFile) if err != nil { return nil, err } @@ -482,7 +532,7 @@ func NewRaftRuntimeManager(config *RaftConfiguration) (Manager, error) { if peer.Name != config.Name && peer.KubeconfigFile == "" && peer.Kubeconfig == nil { config.Logger.Info("registering leader routine", "peer", peer.Name) lockManager.RegisterRoutine(func(ctx context.Context) error { - secretName := kubeconfigCacheSecretName(config.KubeconfigName, peer.Name) + secretName := KubeconfigCacheSecretName(config.KubeconfigName, peer.Name) // Try the local secret cache first so that a new raft leader // can reconnect to peers without requiring a live gRPC call. @@ -504,13 +554,13 @@ func NewRaftRuntimeManager(config *RaftConfiguration) (Manager, error) { return err } kubeconfigBytes = response.Payload - if cacheErr := writeCachedKubeconfig(ctx, localClient, secretName, config.KubeconfigNamespace, kubeconfigBytes); cacheErr != nil { + if cacheErr := writeCachedKubeconfig(ctx, localClient, secretName, config.KubeconfigNamespace, peer.Name, kubeconfigBytes); cacheErr != nil { config.Logger.Error(cacheErr, "caching kubeconfig for peer", "peer", peer.Name) } } config.Logger.Info("loading kubeconfig for peer", "peer", peer.Name) - kubeConfig, err := loadKubeconfigFromBytes(kubeconfigBytes) + kubeConfig, err := LoadKubeconfigFromBytes(kubeconfigBytes) if err != nil { config.Logger.Error(err, "loading kubeconfig for peer", "peer", peer.Name) return err diff --git a/pkg/multicluster/raft_bootstrap_test.go b/pkg/multicluster/raft_bootstrap_test.go index f05357bd8..bc604a7fc 100644 --- a/pkg/multicluster/raft_bootstrap_test.go +++ b/pkg/multicluster/raft_bootstrap_test.go @@ -58,7 +58,7 @@ func (n *bootstrapNode) stop(t *testing.T) { } // restConfigToKubeconfigBytes serialises a REST config into kubeconfig YAML -// bytes that loadKubeconfigFromBytes can later parse back into a *rest.Config. +// bytes that LoadKubeconfigFromBytes can later parse back into a *rest.Config. // Envtest uses client-cert auth, so we embed the cert/key/CA directly. func restConfigToKubeconfigBytes(name string, cfg *rest.Config) ([]byte, error) { kc := clientcmdapi.NewConfig() @@ -250,8 +250,8 @@ func TestIntegrationKubeconfigCaching(t *testing.T) { continue } secretName := kubeconfigSecretName(kubeconfigName, peer.name) + var secret corev1.Secret require.Eventually(t, func() bool { - var secret corev1.Secret err := node.client.Get(ctx, types.NamespacedName{ Name: secretName, Namespace: kubeconfigNamespace, @@ -264,6 +264,16 @@ func TestIntegrationKubeconfigCaching(t *testing.T) { "node %s should have cached kubeconfig secret %s for peer %s", node.name, secretName, peer.name) t.Logf("node %s: kubeconfig secret %s/%s is present", node.name, kubeconfigNamespace, secretName) + + // Cache secrets must carry the discovery labels so external tools + // (e.g. the operator-bundle subcommand) can find them by selector + // rather than by parsing the operator's --kubeconfig-name prefix. + require.Equal(t, multicluster.KubeconfigCacheComponentValue, secret.Labels[multicluster.KubeconfigCacheComponentLabel], + "cache secret %s missing %s label", secretName, multicluster.KubeconfigCacheComponentLabel) + require.Equal(t, multicluster.KubeconfigCacheManagedByValue, secret.Labels[multicluster.KubeconfigCacheManagedByLabel], + "cache secret %s missing %s label", secretName, multicluster.KubeconfigCacheManagedByLabel) + require.Equal(t, peer.name, secret.Labels[multicluster.MulticlusterPeerLabel], + "cache secret %s should record peer name in %s label", secretName, multicluster.MulticlusterPeerLabel) } }