diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml index 582ac9bc6b937..6b3d8d8ee4dec 100644 --- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -152,7 +152,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: docker.io/grafana/loki-operator:0.8.0 - createdAt: "2025-09-12T11:03:05Z" + createdAt: "2025-09-29T10:55:29Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. features.operators.openshift.io/disconnected: "true" diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml index 382c6a5357f5d..21ca49bceb499 100644 --- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml @@ -152,7 +152,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: docker.io/grafana/loki-operator:0.8.0 - createdAt: "2025-09-12T11:03:03Z" + createdAt: "2025-09-29T10:55:27Z" description: The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. operators.operatorframework.io/builder: operator-sdk-unknown diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml index 1517f77d03907..2dfe6a5cd4a56 100644 --- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -152,7 +152,7 @@ metadata: categories: OpenShift Optional, Logging & Tracing certified: "false" containerImage: quay.io/openshift-logging/loki-operator:0.1.0 - createdAt: "2025-09-12T11:03:07Z" + createdAt: "2025-09-29T10:55:30Z" description: | The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging. ## Prerequisites and Requirements diff --git a/operator/cmd/loki-operator/main.go b/operator/cmd/loki-operator/main.go index 5327c510fda4f..ef279a2b2d3f8 100644 --- a/operator/cmd/loki-operator/main.go +++ b/operator/cmd/loki-operator/main.go @@ -110,7 +110,7 @@ func main() { os.Exit(1) } - if ctrlCfg.Gates.ServiceMonitors && ctrlCfg.Gates.OpenShift.Enabled && ctrlCfg.Gates.OpenShift.Dashboards { + if ctrlCfg.Gates.OpenShift.Enabled { var ns string ns, err = operator.GetNamespace() if err != nil { @@ -118,13 +118,14 @@ func main() { os.Exit(1) } - if err = (&lokictrl.DashboardsReconciler{ + if err = (&lokictrl.ClusterScopeReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: logger.WithName("controllers").WithName(lokictrl.ControllerNameLokiDashboards), + Log: logger.WithName("controllers").WithName(lokictrl.ControllerNameLokiClusterScope), OperatorNs: ns, + Dashboards: ctrlCfg.Gates.ServiceMonitors && ctrlCfg.Gates.OpenShift.Dashboards, }).SetupWithManager(mgr); err != nil { - logger.Error(err, "unable to create controller", "controller", lokictrl.ControllerNameLokiDashboards) + logger.Error(err, "unable to create controller", "controller", lokictrl.ControllerNameLokiClusterScope) os.Exit(1) } } diff --git a/operator/internal/controller/loki/dashboards_controller.go b/operator/internal/controller/loki/lokistack_cluster_scoped_controller.go similarity index 74% rename from operator/internal/controller/loki/dashboards_controller.go rename to operator/internal/controller/loki/lokistack_cluster_scoped_controller.go index 4a4602a704f04..666d3d9439090 100644 --- a/operator/internal/controller/loki/dashboards_controller.go +++ b/operator/internal/controller/loki/lokistack_cluster_scoped_controller.go @@ -18,7 +18,7 @@ import ( "github.com/grafana/loki/operator/internal/handlers" ) -const ControllerNameLokiDashboards = "loki-dashboards" +const ControllerNameLokiClusterScope = "loki-cluster-scope" var createOrDeletesPred = builder.WithPredicates(predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { return false }, @@ -27,18 +27,19 @@ var createOrDeletesPred = builder.WithPredicates(predicate.Funcs{ GenericFunc: func(e event.GenericEvent) bool { return false }, }) -// DashboardsReconciler deploys and removes the cluster-global resources needed -// for the metrics dashboards depending on whether any LokiStacks exist. -type DashboardsReconciler struct { +// ClusterScopeReconciler deploys and removes the cluster-global resources needed +// for the metrics dashboards and RBAC resources depending on whether any LokiStacks exist. +type ClusterScopeReconciler struct { client.Client Scheme *runtime.Scheme Log logr.Logger OperatorNs string + Dashboards bool } // Reconcile creates all LokiStack dashboard ConfigMap and PrometheusRule objects on OpenShift clusters when // the at least one LokiStack custom resource exists or removes all when none. -func (r *DashboardsReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *ClusterScopeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { var stacks lokiv1.LokiStackList if err := r.List(ctx, &stacks, client.MatchingLabelsSelector{Selector: labels.Everything()}); err != nil { return ctrl.Result{}, kverrors.Wrap(err, "failed to list any lokistack instances") @@ -47,7 +48,7 @@ func (r *DashboardsReconciler) Reconcile(ctx context.Context, req ctrl.Request) if len(stacks.Items) == 0 { // Removes all LokiStack dashboard resources on OpenShift clusters when // the last LokiStack custom resource is deleted. - if err := handlers.DeleteDashboards(ctx, r.Client, r.OperatorNs); err != nil { + if err := handlers.DeleteClusterScopedResources(ctx, r.Client, r.OperatorNs); err != nil { return ctrl.Result{}, kverrors.Wrap(err, "failed to delete dashboard resources") } return ctrl.Result{}, nil @@ -55,16 +56,16 @@ func (r *DashboardsReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Creates all LokiStack dashboard resources on OpenShift clusters when // the first LokiStack custom resource is created. - if err := handlers.CreateDashboards(ctx, r.Log, r.OperatorNs, r.Client, r.Scheme); err != nil { + if err := handlers.CreateClusterScopedResources(ctx, r.Log, r.Dashboards, r.OperatorNs, r.Client, r.Scheme, stacks.Items); err != nil { return ctrl.Result{}, kverrors.Wrap(err, "failed to create dashboard resources", "req", req) } return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager to only call this controller on create/delete/generic events. -func (r *DashboardsReconciler) SetupWithManager(mgr manager.Manager) error { +func (r *ClusterScopeReconciler) SetupWithManager(mgr manager.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&lokiv1.LokiStack{}, createOrDeletesPred). - Named(ControllerNameLokiDashboards). + Named(ControllerNameLokiClusterScope). Complete(r) } diff --git a/operator/internal/handlers/dashboards_create.go b/operator/internal/handlers/dashboards_create.go deleted file mode 100644 index af5339f10f598..0000000000000 --- a/operator/internal/handlers/dashboards_create.go +++ /dev/null @@ -1,52 +0,0 @@ -package handlers - -import ( - "context" - "fmt" - - "github.com/ViaQ/logerr/v2/kverrors" - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" //nolint:typecheck - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - - "github.com/grafana/loki/operator/internal/external/k8s" - "github.com/grafana/loki/operator/internal/manifests" - "github.com/grafana/loki/operator/internal/manifests/openshift" -) - -// CreateDashboards handles the LokiStack dashboards create events. -func CreateDashboards(ctx context.Context, log logr.Logger, operatorNs string, k k8s.Client, s *runtime.Scheme) error { - objs, err := openshift.BuildDashboards(operatorNs) - if err != nil { - return kverrors.Wrap(err, "failed to build dashboard manifests") - } - - var errCount int32 - for _, obj := range objs { - desired := obj.DeepCopyObject().(client.Object) - mutateFn := manifests.MutateFuncFor(obj, desired, nil) - - op, err := ctrl.CreateOrUpdate(ctx, k, obj, mutateFn) - if err != nil { - log.Error(err, "failed to configure resource") - errCount++ - continue - } - - msg := fmt.Sprintf("Resource has been %s", op) - switch op { - case ctrlutil.OperationResultNone: - log.V(1).Info(msg) - default: - log.Info(msg) - } - } - - if errCount > 0 { - return kverrors.New("failed to configure lokistack dashboard resources") - } - - return nil -} diff --git a/operator/internal/handlers/dashboards_delete.go b/operator/internal/handlers/dashboards_delete.go deleted file mode 100644 index 5573aa69de273..0000000000000 --- a/operator/internal/handlers/dashboards_delete.go +++ /dev/null @@ -1,31 +0,0 @@ -package handlers - -import ( - "context" - - "github.com/ViaQ/logerr/v2/kverrors" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/grafana/loki/operator/internal/external/k8s" - "github.com/grafana/loki/operator/internal/manifests/openshift" -) - -// DeleteDashboards removes all cluster-scoped dashboard resources. -func DeleteDashboards(ctx context.Context, k k8s.Client, operatorNs string) error { - objs, err := openshift.BuildDashboards(operatorNs) - if err != nil { - return kverrors.Wrap(err, "failed to build dashboards manifests") - } - - for _, obj := range objs { - key := client.ObjectKeyFromObject(obj) - if err := k.Delete(ctx, obj, &client.DeleteOptions{}); err != nil { - if apierrors.IsNotFound(err) { - continue - } - return kverrors.Wrap(err, "failed to delete dashboard", "key", key) - } - } - return nil -} diff --git a/operator/internal/handlers/lokistack_cluster_scope_resources_create.go b/operator/internal/handlers/lokistack_cluster_scope_resources_create.go new file mode 100644 index 0000000000000..c5e7776d7ac58 --- /dev/null +++ b/operator/internal/handlers/lokistack_cluster_scope_resources_create.go @@ -0,0 +1,87 @@ +package handlers + +import ( + "context" + "fmt" + + "github.com/ViaQ/logerr/v2/kverrors" + "github.com/go-logr/logr" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" //nolint:typecheck + ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + lokiv1 "github.com/grafana/loki/operator/api/loki/v1" + "github.com/grafana/loki/operator/internal/external/k8s" + "github.com/grafana/loki/operator/internal/manifests" + "github.com/grafana/loki/operator/internal/manifests/openshift" +) + +// CreateClusterScopedResources handles the LokiStack cluster scoped create events. +func CreateClusterScopedResources(ctx context.Context, log logr.Logger, dashboards bool, operatorNs string, k k8s.Client, s *runtime.Scheme, stacks []lokiv1.LokiStack) error { + // This has to be done here as to not introduce a circular dependency. + rulerSubjects := make([]rbacv1.Subject, 0, len(stacks)) + for _, stack := range stacks { + rulerSubjects = append(rulerSubjects, rbacv1.Subject{ + Kind: "ServiceAccount", + Name: manifests.RulerName(stack.Name), + Namespace: stack.Namespace, + }) + } + opts := openshift.NewOptionsClusterScope(operatorNs, manifests.ClusterScopeLabels(), rulerSubjects) + + objs := openshift.BuildRBAC(opts) + if dashboards { + objs = append(objs, openshift.BuildDashboards(opts.OperatorNs)...) + } + + var errCount int32 + for _, obj := range objs { + desired := obj.DeepCopyObject().(client.Object) + mutateFn := manifests.MutateFuncFor(obj, desired, nil) + + op, err := ctrl.CreateOrUpdate(ctx, k, obj, mutateFn) + if err != nil { + log.Error(err, "failed to configure resource") + errCount++ + continue + } + + msg := fmt.Sprintf("Resource has been %s", op) + switch op { + case ctrlutil.OperationResultNone: + log.V(1).Info(msg) + default: + log.Info(msg) + } + } + + if errCount > 0 { + return kverrors.New("failed to configure lokistack cluster-scoped resources") + } + + // Delete legacy RBAC resources + // This needs to live here and not in DeleteClusterScopedResources as we want to + // delete the legacy RBAC resources when LokiStack is reconciled and not on delete. + var legacyObjs []client.Object + for _, stack := range stacks { + // This name would clash with the new cluster-scoped resources. Skip it. + if stack.Name == "lokistack" { + continue + } + legacyObjs = append(legacyObjs, openshift.LegacyRBAC(manifests.GatewayName(stack.Name), manifests.RulerName(stack.Name))...) + } + for _, obj := range legacyObjs { + key := client.ObjectKeyFromObject(obj) + if err := k.Delete(ctx, obj, &client.DeleteOptions{}); err != nil { + if apierrors.IsNotFound(err) { + continue + } + return kverrors.Wrap(err, "failed to delete resource", "kind", obj.GetObjectKind(), "key", key) + } + } + + return nil +} diff --git a/operator/internal/handlers/dashboards_create_test.go b/operator/internal/handlers/lokistack_cluster_scope_resources_create_test.go similarity index 93% rename from operator/internal/handlers/dashboards_create_test.go rename to operator/internal/handlers/lokistack_cluster_scope_resources_create_test.go index 71001c4760bd7..5f077577bb14f 100644 --- a/operator/internal/handlers/dashboards_create_test.go +++ b/operator/internal/handlers/lokistack_cluster_scope_resources_create_test.go @@ -53,7 +53,7 @@ func TestCreateDashboards_ReturnsResourcesInManagedNamespaces(t *testing.T) { k.StatusStub = func() client.StatusWriter { return sw } - err := CreateDashboards(context.TODO(), logger, "test", k, scheme) + err := CreateClusterScopedResources(context.Background(), logger, true, "test", k, scheme, []lokiv1.LokiStack{stack}) require.NoError(t, err) // make sure create was called diff --git a/operator/internal/handlers/lokistack_cluster_scope_resources_delete.go b/operator/internal/handlers/lokistack_cluster_scope_resources_delete.go new file mode 100644 index 0000000000000..ab541be629fa2 --- /dev/null +++ b/operator/internal/handlers/lokistack_cluster_scope_resources_delete.go @@ -0,0 +1,33 @@ +package handlers + +import ( + "context" + + "github.com/ViaQ/logerr/v2/kverrors" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/grafana/loki/operator/internal/external/k8s" + "github.com/grafana/loki/operator/internal/manifests" + "github.com/grafana/loki/operator/internal/manifests/openshift" +) + +// DeleteClusterScopedResources removes all cluster-scoped resources. +func DeleteClusterScopedResources(ctx context.Context, k k8s.Client, operatorNs string) error { + // Since we are deleting we don't need to worry about the subjects. + opts := openshift.NewOptionsClusterScope(operatorNs, manifests.ClusterScopeLabels(), []rbacv1.Subject{}) + + objs := openshift.BuildRBAC(opts) + objs = append(objs, openshift.BuildDashboards(opts.OperatorNs)...) + + for _, obj := range objs { + if err := k.Delete(ctx, obj, &client.DeleteOptions{}); err != nil { + if apierrors.IsNotFound(err) { + continue + } + return kverrors.Wrap(err, "failed to delete dashboard", "kind", obj.GetObjectKind(), "key", client.ObjectKeyFromObject(obj)) + } + } + return nil +} diff --git a/operator/internal/handlers/dashboards_delete_test.go b/operator/internal/handlers/lokistack_cluster_scope_resources_delete_test.go similarity index 60% rename from operator/internal/handlers/dashboards_delete_test.go rename to operator/internal/handlers/lokistack_cluster_scope_resources_delete_test.go index ee05f16adf571..6685a12005a97 100644 --- a/operator/internal/handlers/dashboards_delete_test.go +++ b/operator/internal/handlers/lokistack_cluster_scope_resources_delete_test.go @@ -13,23 +13,24 @@ import ( "github.com/grafana/loki/operator/internal/manifests/openshift" ) -func TestDeleteDashboards(t *testing.T) { - objs, err := openshift.BuildDashboards("operator-ns") - require.NoError(t, err) +func TestDeleteClusterScopedResources(t *testing.T) { + opts := openshift.NewOptionsClusterScope("operator-ns", nil, nil) + objs := openshift.BuildRBAC(opts) + objs = append(objs, openshift.BuildDashboards(opts.OperatorNs)...) k := &k8sfakes.FakeClient{} - err = DeleteDashboards(context.TODO(), k, "operator-ns") + err := DeleteClusterScopedResources(context.Background(), k, "operator-ns") require.NoError(t, err) require.Equal(t, k.DeleteCallCount(), len(objs)) } -func TestDeleteDashboards_ReturnsNoError_WhenNotFound(t *testing.T) { +func TestDeleteClusterScopedResources_ReturnsNoError_WhenNotFound(t *testing.T) { k := &k8sfakes.FakeClient{} k.DeleteStub = func(context.Context, client.Object, ...client.DeleteOption) error { return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") } - err := DeleteDashboards(context.TODO(), k, "operator-ns") + err := DeleteClusterScopedResources(context.Background(), k, "operator-ns") require.NoError(t, err) } diff --git a/operator/internal/manifests/gateway_tenants.go b/operator/internal/manifests/gateway_tenants.go index 72aef1c824d47..200f1ff78de4c 100644 --- a/operator/internal/manifests/gateway_tenants.go +++ b/operator/internal/manifests/gateway_tenants.go @@ -151,8 +151,6 @@ func configureGatewayObjsForMode(objs []client.Object, opts Options) []client.Ob } } - openShiftObjs := openshift.BuildGatewayTenantModeObjects(opts.OpenShiftOptions) - objs = append(objs, openShiftObjs...) } return objs diff --git a/operator/internal/manifests/gateway_tenants_test.go b/operator/internal/manifests/gateway_tenants_test.go index 57f17e7f31daf..bf478c51f8bea 100644 --- a/operator/internal/manifests/gateway_tenants_test.go +++ b/operator/internal/manifests/gateway_tenants_test.go @@ -729,6 +729,7 @@ func TestConfigureDeploymentForMode(t *testing.T) { "--web.healthchecks.url=http://localhost:8082", "--opa.skip-tenants=audit,infrastructure", "--opa.package=lokistack", + "--opa.ssar=true", "--opa.admin-groups=system:cluster-admins,cluster-admin,dedicated-admin", "--opa.matcher=kubernetes_namespace_name,k8s_namespace_name", "--opa.viaq-to-otel-migration=true", @@ -839,6 +840,7 @@ func TestConfigureDeploymentForMode(t *testing.T) { "--web.healthchecks.url=http://localhost:8082", "--opa.skip-tenants=audit,infrastructure", "--opa.package=lokistack", + "--opa.ssar=true", "--opa.admin-groups=system:cluster-admins,cluster-admin,dedicated-admin", "--opa.matcher=kubernetes_namespace_name,k8s_namespace_name", "--opa.viaq-to-otel-migration=true", @@ -955,6 +957,7 @@ func TestConfigureDeploymentForMode(t *testing.T) { "--web.healthchecks.url=http://localhost:8082", "--opa.skip-tenants=audit,infrastructure", "--opa.package=lokistack", + "--opa.ssar=true", "--opa.admin-groups=system:cluster-admins,cluster-admin,dedicated-admin", "--opa.matcher=SrcK8S_Namespace,DstK8S_Namespace", "--opa.matcher-op=or", @@ -1062,6 +1065,7 @@ func TestConfigureDeploymentForMode(t *testing.T) { "--web.healthchecks.url=http://localhost:8082", "--opa.skip-tenants=audit,infrastructure", "--opa.package=lokistack", + "--opa.ssar=true", "--opa.admin-groups=system:cluster-admins,cluster-admin,dedicated-admin", "--opa.matcher=SrcK8S_Namespace,DstK8S_Namespace", "--opa.matcher-op=or", @@ -1177,6 +1181,7 @@ func TestConfigureDeploymentForMode(t *testing.T) { "--web.healthchecks.url=http://localhost:8082", "--opa.skip-tenants=audit,infrastructure", "--opa.package=lokistack", + "--opa.ssar=true", "--opa.admin-groups=custom-admins,other-admins", "--opa.matcher=kubernetes_namespace_name,k8s_namespace_name", "--opa.viaq-to-otel-migration=true", @@ -1275,6 +1280,7 @@ func TestConfigureDeploymentForMode(t *testing.T) { "--web.healthchecks.url=http://localhost:8082", "--opa.skip-tenants=audit,infrastructure", "--opa.package=lokistack", + "--opa.ssar=true", "--opa.matcher=kubernetes_namespace_name,k8s_namespace_name", "--opa.viaq-to-otel-migration=true", `--openshift.mappings=application=loki.grafana.com`, diff --git a/operator/internal/manifests/gateway_test.go b/operator/internal/manifests/gateway_test.go index 0bd637bc0c12b..87169c756f267 100644 --- a/operator/internal/manifests/gateway_test.go +++ b/operator/internal/manifests/gateway_test.go @@ -303,7 +303,7 @@ func TestBuildGateway_HasExtraObjectsForTenantMode(t *testing.T) { }) require.NoError(t, err) - require.Len(t, objs, 13) + require.Len(t, objs, 11) } func TestBuildGateway_WithExtraObjectsForTenantMode_RouteSvcMatches(t *testing.T) { @@ -987,7 +987,7 @@ func TestBuildGateway_PodDisruptionBudget(t *testing.T) { } objs, err := BuildGateway(opts) require.NoError(t, err) - require.Len(t, objs, 13) + require.Len(t, objs, 11) pdb := objs[6].(*policyv1.PodDisruptionBudget) require.NotNil(t, pdb) diff --git a/operator/internal/manifests/internal/gateway/build_test.go b/operator/internal/manifests/internal/gateway/build_test.go index 2b089f464ed08..2b53d191e8793 100644 --- a/operator/internal/manifests/internal/gateway/build_test.go +++ b/operator/internal/manifests/internal/gateway/build_test.go @@ -326,6 +326,7 @@ tenants: serviceAccount: lokistack-gateway redirectURL: https://localhost:8443/openshift/application/callback cookieSecret: abcd + ssrEnabled: true opa: url: http://127.0.0.1:8080/v1/data/lokistack/allow withAccessToken: true @@ -335,6 +336,7 @@ tenants: serviceAccount: lokistack-gateway redirectURL: https://localhost:8443/openshift/infrastructure/callback cookieSecret: efgh + ssrEnabled: true opa: url: http://127.0.0.1:8080/v1/data/lokistack/allow withAccessToken: true @@ -344,6 +346,7 @@ tenants: serviceAccount: lokistack-gateway redirectURL: https://localhost:8443/openshift/audit/callback cookieSecret: deadbeef + ssrEnabled: true opa: url: http://127.0.0.1:8080/v1/data/lokistack/allow withAccessToken: true @@ -428,6 +431,7 @@ tenants: serviceAccount: lokistack-gateway redirectURL: https://localhost:8443/openshift/network/callback cookieSecret: whynot + ssrEnabled: true opa: url: http://127.0.0.1:8080/v1/data/lokistack/allow withAccessToken: true diff --git a/operator/internal/manifests/internal/gateway/gateway-tenants.yaml b/operator/internal/manifests/internal/gateway/gateway-tenants.yaml index aed6870231f60..5a13466a7e3ab 100644 --- a/operator/internal/manifests/internal/gateway/gateway-tenants.yaml +++ b/operator/internal/manifests/internal/gateway/gateway-tenants.yaml @@ -77,6 +77,7 @@ tenants: serviceAccount: {{ $spec.ServiceAccount }} redirectURL: {{ $spec.RedirectURL }} cookieSecret: {{ $spec.CookieSecret }} + ssrEnabled: true opa: url: {{ $l.OpenShiftOptions.Authorization.OPAUrl }} withAccessToken: true diff --git a/operator/internal/manifests/openshift/build.go b/operator/internal/manifests/openshift/build.go index 0a6a1bab531be..e2dcedb4a98f1 100644 --- a/operator/internal/manifests/openshift/build.go +++ b/operator/internal/manifests/openshift/build.go @@ -15,24 +15,11 @@ func BuildGatewayObjects(opts Options) []client.Object { } } -// BuildGatewayTenantModeObjects returns a list of auxiliary openshift/k8s objects -// for lokistack gateway deployments on OpenShift for tenant modes: -// - openshift-logging -// - openshift-network -func BuildGatewayTenantModeObjects(opts Options) []client.Object { - return []client.Object{ - BuildGatewayClusterRole(opts), - BuildGatewayClusterRoleBinding(opts), - } -} - // BuildRulerObjects returns a list of auxiliary openshift/k8s objects // for lokistack ruler deployments on OpenShift. func BuildRulerObjects(opts Options) []client.Object { return []client.Object{ BuildAlertManagerCAConfigMap(opts), BuildRulerServiceAccount(opts), - BuildRulerClusterRole(opts), - BuildRulerClusterRoleBinding(opts), } } diff --git a/operator/internal/manifests/openshift/build_test.go b/operator/internal/manifests/openshift/build_test.go index 5ce0cba1473c2..0ce1ff403c0a4 100644 --- a/operator/internal/manifests/openshift/build_test.go +++ b/operator/internal/manifests/openshift/build_test.go @@ -6,24 +6,9 @@ import ( "time" "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - - lokiv1 "github.com/grafana/loki/operator/api/loki/v1" ) -func TestBuildGatewayTenantModeObjects_ClusterRoleRefMatches(t *testing.T) { - opts := NewOptions("abc", "ns", "abc", "abc", "abc", 1*time.Minute, map[string]string{}, "abc"). - WithTenantsForMode(lokiv1.OpenshiftLogging, "example.com", map[string]TenantData{}) - - objs := BuildGatewayTenantModeObjects(*opts) - cr := objs[0].(*rbacv1.ClusterRole) - rb := objs[1].(*rbacv1.ClusterRoleBinding) - - require.Equal(t, cr.Kind, rb.RoleRef.Kind) - require.Equal(t, cr.Name, rb.RoleRef.Name) -} - func TestBuildGatewayObjects_MonitoringClusterRoleRefMatches(t *testing.T) { opts := NewOptions("abc", "ns", "abc", "abc", "abc", 1*time.Minute, map[string]string{}, "abc") @@ -49,18 +34,3 @@ func TestBuildGatewayObjets_RouteWithTimeoutAnnotation(t *testing.T) { want := fmt.Sprintf("%.fs", routeTimeout.Seconds()) require.Equal(t, want, got) } - -func TestBuildRulerObjects_ClusterRoleRefMatches(t *testing.T) { - opts := NewOptions("abc", "ns", "abc", "abc", "abc", 1*time.Minute, map[string]string{}, "abc") - - objs := BuildRulerObjects(*opts) - sa := objs[1].(*corev1.ServiceAccount) - cr := objs[2].(*rbacv1.ClusterRole) - rb := objs[3].(*rbacv1.ClusterRoleBinding) - - require.Equal(t, sa.Kind, rb.Subjects[0].Kind) - require.Equal(t, sa.Name, rb.Subjects[0].Name) - require.Equal(t, sa.Namespace, rb.Subjects[0].Namespace) - require.Equal(t, cr.Kind, rb.RoleRef.Kind) - require.Equal(t, cr.Name, rb.RoleRef.Name) -} diff --git a/operator/internal/manifests/openshift/dashboards.go b/operator/internal/manifests/openshift/dashboards.go index de476e3202c78..be43be675683f 100644 --- a/operator/internal/manifests/openshift/dashboards.go +++ b/operator/internal/manifests/openshift/dashboards.go @@ -16,7 +16,7 @@ const ( managedConfigNamespace = "openshift-config-managed" ) -func BuildDashboards(operatorNs string) ([]client.Object, error) { +func BuildDashboards(operatorNamespace string) []client.Object { ds, rules := dashboards.Content() var objs []client.Object @@ -24,13 +24,10 @@ func BuildDashboards(operatorNs string) ([]client.Object, error) { objs = append(objs, newDashboardConfigMap(name, content)) } - promRule, err := newDashboardPrometheusRule(operatorNs, rules) - if err != nil { - return nil, err - } + promRule := newDashboardPrometheusRule(operatorNamespace, rules) objs = append(objs, promRule) - return objs, nil + return objs } func newDashboardConfigMap(filename string, content []byte) *corev1.ConfigMap { @@ -54,7 +51,7 @@ func newDashboardConfigMap(filename string, content []byte) *corev1.ConfigMap { } } -func newDashboardPrometheusRule(namespace string, spec *monitoringv1.PrometheusRuleSpec) (*monitoringv1.PrometheusRule, error) { +func newDashboardPrometheusRule(namespace string, spec *monitoringv1.PrometheusRuleSpec) *monitoringv1.PrometheusRule { return &monitoringv1.PrometheusRule{ TypeMeta: metav1.TypeMeta{ Kind: "PrometheusRule", @@ -65,5 +62,5 @@ func newDashboardPrometheusRule(namespace string, spec *monitoringv1.PrometheusR Namespace: namespace, }, Spec: *spec, - }, nil + } } diff --git a/operator/internal/manifests/openshift/dashboards_test.go b/operator/internal/manifests/openshift/dashboards_test.go index 8850715435975..d25e3390b04d5 100644 --- a/operator/internal/manifests/openshift/dashboards_test.go +++ b/operator/internal/manifests/openshift/dashboards_test.go @@ -9,8 +9,8 @@ import ( ) func TestBuildDashboards_ReturnsDashboardConfigMaps(t *testing.T) { - objs, err := BuildDashboards("test") - require.NoError(t, err) + opts := NewOptionsClusterScope("test", nil, nil) + objs := BuildDashboards(opts.OperatorNs) for _, d := range objs { switch d.(type) { @@ -22,8 +22,8 @@ func TestBuildDashboards_ReturnsDashboardConfigMaps(t *testing.T) { } func TestBuildDashboards_ReturnsPrometheusRules(t *testing.T) { - objs, err := BuildDashboards("test") - require.NoError(t, err) + opts := NewOptionsClusterScope("test", nil, nil) + objs := BuildDashboards(opts.OperatorNs) rules := objs[len(objs)-1].(*monitoringv1.PrometheusRule) require.Equal(t, rules.GetName(), dashboardPrometheusRulesName) diff --git a/operator/internal/manifests/openshift/opa_openshift.go b/operator/internal/manifests/openshift/opa_openshift.go index fefaaf4b00db8..0835d1fa848a7 100644 --- a/operator/internal/manifests/openshift/opa_openshift.go +++ b/operator/internal/manifests/openshift/opa_openshift.go @@ -45,6 +45,7 @@ func newOPAOpenShiftContainer(mode lokiv1.ModeType, secretVolumeName, tlsDir, mi fmt.Sprintf("--web.healthchecks.url=http://localhost:%d", GatewayOPAHTTPPort), "--opa.skip-tenants=audit,infrastructure", fmt.Sprintf("--opa.package=%s", opaDefaultPackage), + "--opa.ssar=true", } if len(adminGroups) > 0 { diff --git a/operator/internal/manifests/openshift/options_cluster_scope.go b/operator/internal/manifests/openshift/options_cluster_scope.go new file mode 100644 index 0000000000000..240f3f583173b --- /dev/null +++ b/operator/internal/manifests/openshift/options_cluster_scope.go @@ -0,0 +1,19 @@ +package openshift + +import ( + rbacv1 "k8s.io/api/rbac/v1" +) + +type ClusterScopeOptions struct { + OperatorNs string + RulerSubjects []rbacv1.Subject + Labels map[string]string +} + +func NewOptionsClusterScope(operatorNs string, labels map[string]string, rulerSubjects []rbacv1.Subject) *ClusterScopeOptions { + return &ClusterScopeOptions{ + OperatorNs: operatorNs, + RulerSubjects: rulerSubjects, + Labels: labels, + } +} diff --git a/operator/internal/manifests/openshift/rbac.go b/operator/internal/manifests/openshift/rbac.go index 46e5837a2c262..5ab0133c1148a 100644 --- a/operator/internal/manifests/openshift/rbac.go +++ b/operator/internal/manifests/openshift/rbac.go @@ -3,90 +3,66 @@ package openshift import ( rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// BuildGatewayClusterRole returns a k8s ClusterRole object for the -// lokistack gateway serviceaccount to allow creating: -// - TokenReviews to authenticate the user by bearer token. -// - SubjectAccessReview to authorize the user by bearer token. -// if having access to read/create logs. -func BuildGatewayClusterRole(opts Options) *rbacv1.ClusterRole { - return &rbacv1.ClusterRole{ - TypeMeta: metav1.TypeMeta{ - Kind: "ClusterRole", - APIVersion: rbacv1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: authorizerRbacName(opts.BuildOpts.GatewayName), - Labels: opts.BuildOpts.Labels, - }, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{ - "authentication.k8s.io", - }, - Resources: []string{ - "tokenreviews", - }, - Verbs: []string{ - "create", - }, +const ( + gatewayName = "lokistack-gateway" + rulerName = "lokistack-ruler" +) + +func BuildRBAC(opts *ClusterScopeOptions) []client.Object { + objs := make([]client.Object, 0, 2) + objs = append(objs, buildRulerClusterRole(opts.Labels)) + objs = append(objs, buildRulerClusterRoleBinding(opts.Labels, opts.RulerSubjects)) + return objs +} + +func LegacyRBAC(gatewayName, rulerName string) []client.Object { + objs := make([]client.Object, 0, 4) + + clusterrole := func(name string) *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRole", + APIVersion: rbacv1.SchemeGroupVersion.String(), }, - { - APIGroups: []string{ - "authorization.k8s.io", - }, - Resources: []string{ - "subjectaccessreviews", - }, - Verbs: []string{ - "create", - }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, }, - }, + } } -} - -// BuildGatewayClusterRoleBinding returns a k8s ClusterRoleBinding object for -// the lokistack gateway serviceaccount to grant access to: -// - rbac.authentication.k8s.io/TokenReviews -// - rbac.authorization.k8s.io/SubjectAccessReviews -func BuildGatewayClusterRoleBinding(opts Options) *rbacv1.ClusterRoleBinding { - return &rbacv1.ClusterRoleBinding{ - TypeMeta: metav1.TypeMeta{ - Kind: "ClusterRoleBinding", - APIVersion: rbacv1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: authorizerRbacName(opts.BuildOpts.GatewayName), - Labels: opts.BuildOpts.Labels, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: authorizerRbacName(opts.BuildOpts.GatewayName), - }, - Subjects: []rbacv1.Subject{ - { - Kind: rbacv1.ServiceAccountKind, - Name: gatewayServiceAccountName(opts), - Namespace: opts.BuildOpts.LokiStackNamespace, + clusterrolebinding := func(name string) *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRoleBinding", + APIVersion: rbacv1.SchemeGroupVersion.String(), }, - }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } } + + objs = append(objs, clusterrole(authorizerRbacName(gatewayName))) + objs = append(objs, clusterrolebinding(authorizerRbacName(gatewayName))) + objs = append(objs, clusterrole(authorizerRbacName(rulerName))) + objs = append(objs, clusterrolebinding(authorizerRbacName(rulerName))) + + return objs } -// BuildRulerClusterRole returns a k8s ClusterRole object for the +// buildRulerClusterRole returns a k8s ClusterRole object for the // lokistack ruler serviceaccount to allow patching sending alerts to alertmanagers. -func BuildRulerClusterRole(opts Options) *rbacv1.ClusterRole { +func buildRulerClusterRole(labels map[string]string) *rbacv1.ClusterRole { return &rbacv1.ClusterRole{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterRole", APIVersion: rbacv1.SchemeGroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ - Name: authorizerRbacName(opts.BuildOpts.RulerName), - Labels: opts.BuildOpts.Labels, + Name: authorizerRbacName(rulerName), + Labels: labels, }, Rules: []rbacv1.PolicyRule{ { @@ -123,29 +99,23 @@ func BuildRulerClusterRole(opts Options) *rbacv1.ClusterRole { } } -// BuildRulerClusterRoleBinding returns a k8s ClusterRoleBinding object for +// buildRulerClusterRoleBinding returns a k8s ClusterRoleBinding object for // the lokistack ruler serviceaccount to grant access to alertmanagers. -func BuildRulerClusterRoleBinding(opts Options) *rbacv1.ClusterRoleBinding { +func buildRulerClusterRoleBinding(labels map[string]string, subjects []rbacv1.Subject) *rbacv1.ClusterRoleBinding { return &rbacv1.ClusterRoleBinding{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterRoleBinding", APIVersion: rbacv1.SchemeGroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ - Name: authorizerRbacName(opts.BuildOpts.RulerName), - Labels: opts.BuildOpts.Labels, + Name: authorizerRbacName(rulerName), + Labels: labels, }, RoleRef: rbacv1.RoleRef{ APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", - Name: authorizerRbacName(opts.BuildOpts.RulerName), - }, - Subjects: []rbacv1.Subject{ - { - Kind: rbacv1.ServiceAccountKind, - Name: rulerServiceAccountName(opts), - Namespace: opts.BuildOpts.LokiStackNamespace, - }, + Name: authorizerRbacName(rulerName), }, + Subjects: subjects, } } diff --git a/operator/internal/manifests/openshift/rbac_test.go b/operator/internal/manifests/openshift/rbac_test.go new file mode 100644 index 0000000000000..8a2afb9c4146b --- /dev/null +++ b/operator/internal/manifests/openshift/rbac_test.go @@ -0,0 +1,64 @@ +package openshift + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + rbacv1 "k8s.io/api/rbac/v1" +) + +func TestBuildRBAC(t *testing.T) { + subjects := []rbacv1.Subject{{ + Kind: "ServiceAccount", + Name: "test-sa", + Namespace: "test-ns", + }} + opts := NewOptionsClusterScope("test", map[string]string{"app": "loki"}, subjects) + + objs := BuildRBAC(opts) + + require.Len(t, objs, 2, "Should only return the ClusterRole and ClusterRoleBinding for the ruler") + + cr, ok := objs[0].(*rbacv1.ClusterRole) + require.True(t, ok, "First object should be a ClusterRole") + crb, ok := objs[1].(*rbacv1.ClusterRoleBinding) + require.True(t, ok, "Second object should be a ClusterRoleBinding") + + require.Equal(t, cr.Name, crb.RoleRef.Name) + require.Equal(t, subjects, crb.Subjects) +} + +func TestLegacyRBAC(t *testing.T) { + // Define test gateway and ruler names + testGatewayName := "test-gateway" + testRulerName := "test-ruler" + expectedGatewayName := fmt.Sprintf("%s-%s", testGatewayName, "authorizer") + expectedRulerName := fmt.Sprintf("%s-%s", testRulerName, "authorizer") + + // Call the function under test + objs := LegacyRBAC(testGatewayName, testRulerName) + + // Verify the number of returned objects + require.Len(t, objs, 4, "Should return exactly 4 objects") + + // Check types and extract objects + gatewayCR, ok := objs[0].(*rbacv1.ClusterRole) + require.True(t, ok, "First object should be a ClusterRole for gateway") + gatewayCRB, ok := objs[1].(*rbacv1.ClusterRoleBinding) + require.True(t, ok, "Second object should be a ClusterRoleBinding for gateway") + rulerCR, ok := objs[2].(*rbacv1.ClusterRole) + require.True(t, ok, "Third object should be a ClusterRole for ruler") + rulerCRB, ok := objs[3].(*rbacv1.ClusterRoleBinding) + require.True(t, ok, "Fourth object should be a ClusterRoleBinding for ruler") + + // Verify gateway we only care about the name since these resources will only + // deleted + require.Equal(t, gatewayCR.Name, gatewayCRB.Name) + require.Equal(t, gatewayCR.Name, expectedGatewayName) + + // Verify ruler we only care about the name since these resources will only + // deleted + require.Equal(t, rulerCR.Name, rulerCRB.Name) + require.Equal(t, rulerCR.Name, expectedRulerName) +} diff --git a/operator/internal/manifests/openshift/var.go b/operator/internal/manifests/openshift/var.go index 5e3ac6300e3eb..af54e302e869e 100644 --- a/operator/internal/manifests/openshift/var.go +++ b/operator/internal/manifests/openshift/var.go @@ -66,10 +66,6 @@ func routeName(opts Options) string { return opts.BuildOpts.LokiStackName } -func gatewayServiceAccountName(opts Options) string { - return opts.BuildOpts.GatewayName -} - func rulerServiceAccountName(opts Options) string { return opts.BuildOpts.RulerName } diff --git a/operator/internal/manifests/ruler_test.go b/operator/internal/manifests/ruler_test.go index 866a40b6c10e8..b2c19800ec3bc 100644 --- a/operator/internal/manifests/ruler_test.go +++ b/operator/internal/manifests/ruler_test.go @@ -103,7 +103,7 @@ func TestBuildRuler_HasExtraObjectsForTenantMode(t *testing.T) { }) require.NoError(t, err) - require.Len(t, objs, 8) + require.Len(t, objs, 6) } func TestNewRulerStatefulSet_SelectorMatchesLabels(t *testing.T) { diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go index beeb312f1aaec..e02ac5412308e 100644 --- a/operator/internal/manifests/var.go +++ b/operator/internal/manifests/var.go @@ -190,6 +190,14 @@ func ComponentLabels(component, stackName string) labels.Set { }) } +// ClusterScopeLabels is a list of labels that should be assigned to all cluster scoped compoents +func ClusterScopeLabels() map[string]string { + return map[string]string{ + "app.kubernetes.io/managed-by": "lokistack-controller", + "app.kubernetes.io/created-by": "lokistack-controller", + } +} + // GossipLabels is the list of labels that should be assigned to components using the gossip ring func GossipLabels() map[string]string { return map[string]string{