From 551ed20d1ad85fa17022d9b85e840d83a5b872d0 Mon Sep 17 00:00:00 2001 From: Israel Blancas Date: Wed, 8 Jan 2025 17:51:32 +0100 Subject: [PATCH 1/2] Add automatic RBAC creation for prometheus receiver Signed-off-by: Israel Blancas --- .../3078-add-prometheus-receiver-rbac.yaml | 16 + Makefile | 1 + apis/v1beta1/config.go | 78 ++++- controllers/common.go | 14 +- .../opentelemetrycollector_controller.go | 36 +- .../opentelemetrycollector_controller_test.go | 124 +++++++ internal/components/builder.go | 60 ++-- internal/components/builder_test.go | 6 +- internal/components/component.go | 22 +- internal/components/generic_parser.go | 47 ++- internal/components/generic_parser_test.go | 12 +- internal/components/multi_endpoint.go | 10 +- internal/components/multi_endpoint_test.go | 2 +- internal/components/processors/helpers.go | 10 +- .../components/processors/k8sattribute.go | 2 +- .../processors/k8sattribute_test.go | 6 +- .../processors/resourcedetection.go | 2 +- .../processors/resourcedetection_test.go | 6 +- internal/components/receivers/helpers.go | 14 +- internal/components/receivers/k8sevents.go | 2 +- internal/components/receivers/k8sobjects.go | 2 +- .../components/receivers/k8sobjects_test.go | 2 +- internal/components/receivers/kubeletstats.go | 2 +- .../components/receivers/kubeletstats_test.go | 2 +- internal/components/receivers/prometheus.go | 160 +++++++++ .../components/receivers/prometheus_test.go | 310 ++++++++++++++++++ internal/manifests/builder.go | 8 + internal/manifests/collector/collector.go | 24 +- internal/manifests/collector/rbac.go | 50 ++- internal/manifests/collector/route.go | 12 +- internal/manifests/collector/route_test.go | 12 +- internal/manifests/mutate.go | 6 + internal/naming/main.go | 10 + .../extra-permissions-operator/endpoints.yaml | 11 + .../extra-permissions-operator/rbac.yaml | 2 + .../receiver-prometheus/00-install.yaml | 9 + .../receiver-prometheus/01-assert.yaml | 28 ++ .../receiver-prometheus/01-install.yaml | 25 ++ .../receiver-prometheus/02-assert.yaml | 28 ++ .../receiver-prometheus/02-install.yaml | 25 ++ .../receiver-prometheus/03-assert.yaml | 28 ++ .../receiver-prometheus/03-install.yaml | 25 ++ .../receiver-prometheus/04-assert.yaml | 29 ++ .../receiver-prometheus/04-install.yaml | 25 ++ .../receiver-prometheus/05-assert.yaml | 28 ++ .../receiver-prometheus/05-install.yaml | 25 ++ .../receiver-prometheus/06-delete.yaml | 5 + .../receiver-prometheus/06-error.yaml | 11 + .../receiver-prometheus/chainsaw-test.yaml | 48 +++ 49 files changed, 1317 insertions(+), 105 deletions(-) create mode 100755 .chloggen/3078-add-prometheus-receiver-rbac.yaml create mode 100644 controllers/opentelemetrycollector_controller_test.go create mode 100644 internal/components/receivers/prometheus.go create mode 100644 internal/components/receivers/prometheus_test.go create mode 100644 tests/e2e-automatic-rbac/extra-permissions-operator/endpoints.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/00-install.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/01-assert.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/01-install.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/02-assert.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/02-install.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/03-assert.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/03-install.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/04-assert.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/04-install.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/05-assert.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/05-install.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/06-delete.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/06-error.yaml create mode 100644 tests/e2e-automatic-rbac/receiver-prometheus/chainsaw-test.yaml diff --git a/.chloggen/3078-add-prometheus-receiver-rbac.yaml b/.chloggen/3078-add-prometheus-receiver-rbac.yaml new file mode 100755 index 0000000000..0fd57acd4c --- /dev/null +++ b/.chloggen/3078-add-prometheus-receiver-rbac.yaml @@ -0,0 +1,16 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action) +component: collector + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Create RBAC automatically for the Prometheus receiver + +# One or more tracking issues related to the change +issues: [3078] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: diff --git a/Makefile b/Makefile index 8d5235c9dc..72025fd287 100644 --- a/Makefile +++ b/Makefile @@ -210,6 +210,7 @@ add-rbac-permissions-to-operator: manifests kustomize cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/cronjobs.yaml cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/daemonsets.yaml cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/events.yaml + cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/endpoints.yaml cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/extensions.yaml cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/namespaces.yaml cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/namespaces-status.yaml diff --git a/apis/v1beta1/config.go b/apis/v1beta1/config.go index a7fd65835d..48418800c8 100644 --- a/apis/v1beta1/config.go +++ b/apis/v1beta1/config.go @@ -154,7 +154,7 @@ type Config struct { } // getRbacRulesForComponentKinds gets the RBAC Rules for the given ComponentKind(s). -func (c *Config) getRbacRulesForComponentKinds(logger logr.Logger, componentKinds ...ComponentKind) ([]rbacv1.PolicyRule, error) { +func (c *Config) getClusterRoleRbacRulesForComponentKinds(logger logr.Logger, componentKinds ...ComponentKind) ([]rbacv1.PolicyRule, error) { var rules []rbacv1.PolicyRule enabledComponents := c.GetEnabledComponents() for _, componentKind := range componentKinds { @@ -180,7 +180,7 @@ func (c *Config) getRbacRulesForComponentKinds(logger logr.Logger, componentKind for componentName := range enabledComponents[componentKind] { // TODO: Clean up the naming here and make it simpler to use a retriever. parser := retriever(componentName) - if parsedRules, err := parser.GetRBACRules(logger, cfg.Object[componentName]); err != nil { + if parsedRules, err := parser.GetClusterRoleRules(logger, cfg.Object[componentName]); err != nil { return nil, err } else { rules = append(rules, parsedRules...) @@ -190,6 +190,68 @@ func (c *Config) getRbacRulesForComponentKinds(logger logr.Logger, componentKind return rules, nil } +// getRbacRolesForComponentKinds gets the RBAC Roles for the given ComponentKind(s). +func (c *Config) getRbacRolesForComponentKinds(logger logr.Logger, otelCollectorName string, componentKinds ...ComponentKind) ([]*rbacv1.Role, error) { + var roles []*rbacv1.Role + enabledComponents := c.GetEnabledComponents() + for _, componentKind := range componentKinds { + var retriever components.ParserRetriever + var cfg AnyConfig + switch componentKind { + case KindReceiver: + retriever = receivers.ReceiverFor + cfg = c.Receivers + case KindExporter: + continue + case KindProcessor: + continue + case KindExtension: + continue + } + for componentName := range enabledComponents[componentKind] { + // TODO: Clean up the naming here and make it simpler to use a retriever. + parser := retriever(componentName) + if parsedRoles, err := parser.GetRbacRoles(logger, otelCollectorName, cfg.Object[componentName]); err != nil { + return nil, err + } else { + roles = append(roles, parsedRoles...) + } + } + } + return roles, nil +} + +// getRbacRoleBindingsForComponentKinds gets the RBAC RoleBindings for the given ComponentKind(s). +func (c *Config) getRbacRoleBindingsForComponentKinds(logger logr.Logger, serviceAccountName string, otelCollectorName string, otelCollectorNamespace string, componentKinds ...ComponentKind) ([]*rbacv1.RoleBinding, error) { + var roleBindings []*rbacv1.RoleBinding + enabledComponents := c.GetEnabledComponents() + for _, componentKind := range componentKinds { + var retriever components.ParserRetriever + var cfg AnyConfig + switch componentKind { + case KindReceiver: + retriever = receivers.ReceiverFor + cfg = c.Receivers + case KindExporter: + continue + case KindProcessor: + continue + case KindExtension: + continue + } + for componentName := range enabledComponents[componentKind] { + // TODO: Clean up the naming here and make it simpler to use a retriever. + parser := retriever(componentName) + if parsedRoleBindings, err := parser.GetRbacRoleBindings(logger, otelCollectorName, cfg.Object[componentName], serviceAccountName, otelCollectorNamespace); err != nil { + return nil, err + } else { + roleBindings = append(roleBindings, parsedRoleBindings...) + } + } + } + return roleBindings, nil +} + // getPortsForComponentKinds gets the ports for the given ComponentKind(s). func (c *Config) getPortsForComponentKinds(logger logr.Logger, componentKinds ...ComponentKind) ([]corev1.ServicePort, error) { var ports []corev1.ServicePort @@ -340,8 +402,16 @@ func (c *Config) GetEnvironmentVariables(logger logr.Logger) ([]corev1.EnvVar, e return c.getEnvironmentVariablesForComponentKinds(logger, KindReceiver) } -func (c *Config) GetAllRbacRules(logger logr.Logger) ([]rbacv1.PolicyRule, error) { - return c.getRbacRulesForComponentKinds(logger, KindReceiver, KindExporter, KindProcessor) +func (c *Config) GetAllClusterRoleRbacRules(logger logr.Logger) ([]rbacv1.PolicyRule, error) { + return c.getClusterRoleRbacRulesForComponentKinds(logger, KindReceiver, KindExporter, KindProcessor) +} + +func (c *Config) GetAllRbacRoles(logger logr.Logger, otelCollectorName string) ([]*rbacv1.Role, error) { + return c.getRbacRolesForComponentKinds(logger, otelCollectorName, KindReceiver, KindExporter, KindProcessor) +} + +func (c *Config) GetAllRbacRoleBindings(logger logr.Logger, serviceAccountName string, otelCollectorName string, otelCollectorNamespace string) ([]*rbacv1.RoleBinding, error) { + return c.getRbacRoleBindingsForComponentKinds(logger, serviceAccountName, otelCollectorName, otelCollectorNamespace, KindReceiver, KindExporter, KindProcessor) } func (c *Config) ApplyDefaults(logger logr.Logger) error { diff --git a/controllers/common.go b/controllers/common.go index 1dbea9da0b..e441ce4a94 100644 --- a/controllers/common.go +++ b/controllers/common.go @@ -158,16 +158,22 @@ func reconcileDesiredObjects(ctx context.Context, kubeClient client.Client, logg "object_kind", desired.GetObjectKind(), ) if isNamespaceScoped(desired) { - if setErr := ctrl.SetControllerReference(owner, desired, scheme); setErr != nil { - l.Error(setErr, "failed to set controller owner reference to desired") - errs = append(errs, setErr) - continue + switch desired.(type) { + case *rbacv1.Role, *rbacv1.RoleBinding: + l.Info("skipping setting controller reference for role or rolebinding") + default: + if setErr := ctrl.SetControllerReference(owner, desired, scheme); setErr != nil { + l.Error(setErr, "failed to set controller owner reference to desired") + errs = append(errs, setErr) + continue + } } } // existing is an object the controller runtime will hydrate for us // we obtain the existing object by deep copying the desired object because it's the most convenient way existing := desired.DeepCopyObject().(client.Object) mutateFn := manifests.MutateFuncFor(existing, desired) + var op controllerutil.OperationResult crudErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { result, createOrUpdateErr := ctrl.CreateOrUpdate(ctx, kubeClient, existing, mutateFn) diff --git a/controllers/opentelemetrycollector_controller.go b/controllers/opentelemetrycollector_controller.go index b447c193a9..ec5172d46d 100644 --- a/controllers/opentelemetrycollector_controller.go +++ b/controllers/opentelemetrycollector_controller.go @@ -60,6 +60,8 @@ var ( ownedClusterObjectTypes = []client.Object{ &rbacv1.ClusterRole{}, &rbacv1.ClusterRoleBinding{}, + &rbacv1.Role{}, + &rbacv1.RoleBinding{}, } ) @@ -91,14 +93,14 @@ func (r *OpenTelemetryCollectorReconciler) findOtelOwnedObjects(ctx context.Cont client.InNamespace(params.OtelCol.Namespace), client.MatchingFields{resourceOwnerKey: params.OtelCol.Name}, } + rbacObjectsFound := false for _, objectType := range ownedObjectTypes { + var objs map[types.UID]client.Object objs, err := getList(ctx, r, objectType, listOpts...) if err != nil { return nil, err } - for uid, object := range objs { - ownedObjects[uid] = object - } + // save Collector ConfigMaps into a separate slice, we need to do additional filtering on them switch objectType.(type) { case *corev1.ConfigMap: @@ -110,8 +112,20 @@ func (r *OpenTelemetryCollectorReconciler) findOtelOwnedObjects(ctx context.Cont configMap := object.(*corev1.ConfigMap) collectorConfigMaps = append(collectorConfigMaps, configMap) } + case *rbacv1.ClusterRoleBinding, *rbacv1.ClusterRole, *rbacv1.RoleBinding, *rbacv1.Role: + if params.Config.CreateRBACPermissions() == rbac.Available && !rbacObjectsFound { + objs, err = r.findRBACObjects(ctx, params) + if err != nil { + return nil, err + } + rbacObjectsFound = true + } default: } + + for uid, object := range objs { + ownedObjects[uid] = object + } } // at this point we don't know if the most recent ConfigMap will still be the most recent after reconciliation, or // if a new one will be created. We keep one additional ConfigMap to account for this. The next reconciliation that @@ -125,11 +139,15 @@ func (r *OpenTelemetryCollectorReconciler) findOtelOwnedObjects(ctx context.Cont return ownedObjects, nil } -// The cluster scope objects do not have owner reference. -func (r *OpenTelemetryCollectorReconciler) findClusterRoleObjects(ctx context.Context, params manifests.Params) (map[types.UID]client.Object, error) { +// findRBACObjects finds ClusterRoles, ClusterRoleBindings, Roles, and RoleBindings. +// Those objects do not have owner references. +// - ClusterRoles and ClusterRoleBindings cannot have owner references +// - Roles and RoleBindings can exist in a different namespace than the OpenTelemetryCollector +// +// Users might switch off the RBAC creation feature on the operator which should remove existing RBAC. +func (r *OpenTelemetryCollectorReconciler) findRBACObjects(ctx context.Context, params manifests.Params) (map[types.UID]client.Object, error) { ownedObjects := map[types.UID]client.Object{} - // Remove cluster roles and bindings. - // Users might switch off the RBAC creation feature on the operator which should remove existing RBAC. + listOpsCluster := &client.ListOptions{ LabelSelector: labels.SelectorFromSet( manifestutils.SelectorLabels(params.OtelCol.ObjectMeta, collector.ComponentOpenTelemetryCollector)), @@ -356,6 +374,8 @@ func (r *OpenTelemetryCollectorReconciler) GetOwnedResourceTypes() []client.Obje if r.config.CreateRBACPermissions() == rbac.Available { ownedResources = append(ownedResources, &rbacv1.ClusterRole{}) ownedResources = append(ownedResources, &rbacv1.ClusterRoleBinding{}) + ownedResources = append(ownedResources, &rbacv1.Role{}) + ownedResources = append(ownedResources, &rbacv1.RoleBinding{}) } if featuregate.PrometheusOperatorIsAvailable.IsEnabled() && r.config.PrometheusCRAvailability() == prometheus.Available { @@ -375,7 +395,7 @@ const collectorFinalizer = "opentelemetrycollector.opentelemetry.io/finalizer" func (r *OpenTelemetryCollectorReconciler) finalizeCollector(ctx context.Context, params manifests.Params) error { // The cluster scope objects do not have owner reference. They need to be deleted explicitly if params.Config.CreateRBACPermissions() == rbac.Available { - objects, err := r.findClusterRoleObjects(ctx, params) + objects, err := r.findRBACObjects(ctx, params) if err != nil { return err } diff --git a/controllers/opentelemetrycollector_controller_test.go b/controllers/opentelemetrycollector_controller_test.go new file mode 100644 index 0000000000..f9bb6ce268 --- /dev/null +++ b/controllers/opentelemetrycollector_controller_test.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controllers + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/open-telemetry/opentelemetry-operator/apis/v1beta1" + "github.com/open-telemetry/opentelemetry-operator/internal/config" +) + +func TestReconcile(t *testing.T) { + logger := zap.New() + ctx := context.Background() + + scheme := runtime.NewScheme() + require.NoError(t, v1beta1.AddToScheme(scheme)) + require.NoError(t, corev1.AddToScheme(scheme)) + + tests := []struct { + name string + existingState []runtime.Object + expectedResult ctrl.Result + expectedError bool + }{ + { + name: "collector not found", + existingState: []runtime.Object{}, + expectedResult: ctrl.Result{}, + expectedError: false, + }, + { + name: "unmanaged collector", + existingState: []runtime.Object{ + &v1beta1.OpenTelemetryCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-collector", + Namespace: "default", + }, + Spec: v1beta1.OpenTelemetryCollectorSpec{ + OpenTelemetryCommonFields: v1beta1.OpenTelemetryCommonFields{ + ManagementState: v1beta1.ManagementStateUnmanaged, + }, + }, + }, + }, + expectedResult: ctrl.Result{}, + expectedError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(tt.existingState...). + Build() + + r := &OpenTelemetryCollectorReconciler{ + Client: client, + log: logger, + scheme: scheme, + config: config.New(), + recorder: record.NewFakeRecorder(100), + } + + result, err := r.Reconcile(ctx, ctrl.Request{}) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tt.expectedResult, result) + }) + } +} + +func TestNewReconciler(t *testing.T) { + scheme := runtime.NewScheme() + client := fake.NewClientBuilder().WithScheme(scheme).Build() + recorder := record.NewFakeRecorder(100) + logger := zap.New() + cfg := config.New() + + params := Params{ + Client: client, + Recorder: recorder, + Scheme: scheme, + Log: logger, + Config: cfg, + } + + r := NewReconciler(params) + + assert.Equal(t, client, r.Client) + assert.Equal(t, recorder, r.recorder) + assert.Equal(t, scheme, r.scheme) + assert.Equal(t, logger, r.log) + assert.Equal(t, cfg, r.config) +} diff --git a/internal/components/builder.go b/internal/components/builder.go index a1c9b34bcc..d3fcaa1076 100644 --- a/internal/components/builder.go +++ b/internal/components/builder.go @@ -26,19 +26,21 @@ import ( type ParserOption[ComponentConfigType any] func(*Settings[ComponentConfigType]) type Settings[ComponentConfigType any] struct { - protocol corev1.Protocol - appProtocol *string - targetPort intstr.IntOrString - nodePort int32 - name string - port int32 - defaultRecAddr string - portParser PortParser[ComponentConfigType] - rbacGen RBACRuleGenerator[ComponentConfigType] - livenessGen ProbeGenerator[ComponentConfigType] - readinessGen ProbeGenerator[ComponentConfigType] - defaultsApplier Defaulter[ComponentConfigType] - envVarGen EnvVarGenerator[ComponentConfigType] + protocol corev1.Protocol + appProtocol *string + targetPort intstr.IntOrString + nodePort int32 + name string + port int32 + defaultRecAddr string + portParser PortParser[ComponentConfigType] + clusterRoleRulesGen ClusterRoleRulesGenerator[ComponentConfigType] + roleGen RoleGenerator[ComponentConfigType] + roleBindingGen RoleBindingGenerator[ComponentConfigType] + livenessGen ProbeGenerator[ComponentConfigType] + readinessGen ProbeGenerator[ComponentConfigType] + defaultsApplier Defaulter[ComponentConfigType] + envVarGen EnvVarGenerator[ComponentConfigType] } func NewEmptySettings[ComponentConfigType any]() *Settings[ComponentConfigType] { @@ -108,9 +110,19 @@ func (b Builder[ComponentConfigType]) WithPortParser(portParser PortParser[Compo o.portParser = portParser }) } -func (b Builder[ComponentConfigType]) WithRbacGen(rbacGen RBACRuleGenerator[ComponentConfigType]) Builder[ComponentConfigType] { +func (b Builder[ComponentConfigType]) WithClusterRoleRulesGen(clusterRoleRulesGen ClusterRoleRulesGenerator[ComponentConfigType]) Builder[ComponentConfigType] { return append(b, func(o *Settings[ComponentConfigType]) { - o.rbacGen = rbacGen + o.clusterRoleRulesGen = clusterRoleRulesGen + }) +} +func (b Builder[ComponentConfigType]) WithRoleGen(roleGen RoleGenerator[ComponentConfigType]) Builder[ComponentConfigType] { + return append(b, func(o *Settings[ComponentConfigType]) { + o.roleGen = roleGen + }) +} +func (b Builder[ComponentConfigType]) WithRoleBindingGen(roleBindingGen RoleBindingGenerator[ComponentConfigType]) Builder[ComponentConfigType] { + return append(b, func(o *Settings[ComponentConfigType]) { + o.roleBindingGen = roleBindingGen }) } @@ -143,14 +155,16 @@ func (b Builder[ComponentConfigType]) Build() (*GenericParser[ComponentConfigTyp return nil, fmt.Errorf("invalid settings struct, no name specified") } return &GenericParser[ComponentConfigType]{ - name: o.name, - portParser: o.portParser, - rbacGen: o.rbacGen, - envVarGen: o.envVarGen, - livenessGen: o.livenessGen, - readinessGen: o.readinessGen, - defaultsApplier: o.defaultsApplier, - settings: o, + name: o.name, + portParser: o.portParser, + clusterRoleRulesGen: o.clusterRoleRulesGen, + roleGen: o.roleGen, + roleBindingGen: o.roleBindingGen, + envVarGen: o.envVarGen, + livenessGen: o.livenessGen, + readinessGen: o.readinessGen, + defaultsApplier: o.defaultsApplier, + settings: o, }, nil } diff --git a/internal/components/builder_test.go b/internal/components/builder_test.go index 0e82f6bde3..0e4e7d7fd9 100644 --- a/internal/components/builder_test.go +++ b/internal/components/builder_test.go @@ -123,7 +123,7 @@ func TestBuilder_Build(t *testing.T) { WithName("secure-service"). WithPort(443). WithProtocol(corev1.ProtocolTCP). - WithRbacGen(func(logger logr.Logger, config sampleConfig) ([]rbacv1.PolicyRule, error) { + WithClusterRoleRulesGen(func(logger logr.Logger, config sampleConfig) ([]rbacv1.PolicyRule, error) { rules := []rbacv1.PolicyRule{ { NonResourceURLs: []string{config.example}, @@ -174,7 +174,7 @@ func TestBuilder_Build(t *testing.T) { WithName("secure-service"). WithPort(443). WithProtocol(corev1.ProtocolTCP). - WithRbacGen(func(logger logr.Logger, config sampleConfig) ([]rbacv1.PolicyRule, error) { + WithClusterRoleRulesGen(func(logger logr.Logger, config sampleConfig) ([]rbacv1.PolicyRule, error) { rules := []rbacv1.PolicyRule{ { NonResourceURLs: []string{config.example}, @@ -290,7 +290,7 @@ func TestBuilder_Build(t *testing.T) { ports, err := got.Ports(logr.Discard(), got.ParserType(), tt.params.conf) assert.NoError(t, err) assert.Equalf(t, tt.want.ports, ports, "Ports()") - rules, rbacErr := got.GetRBACRules(logr.Discard(), tt.params.conf) + rules, rbacErr := got.GetClusterRoleRules(logr.Discard(), tt.params.conf) if tt.wantRbacErr(t, rbacErr, "WantRbacErr()") && rbacErr != nil { return } diff --git a/internal/components/component.go b/internal/components/component.go index b2341177e8..a82d412363 100644 --- a/internal/components/component.go +++ b/internal/components/component.go @@ -41,9 +41,17 @@ type PortRetriever interface { // PortParser is a function that returns a list of servicePorts given a config of type Config. type PortParser[ComponentConfigType any] func(logger logr.Logger, name string, defaultPort *corev1.ServicePort, config ComponentConfigType) ([]corev1.ServicePort, error) -// RBACRuleGenerator is a function that generates a list of RBAC Rules given a configuration of type Config +// ClusterRoleRulesGenerator is a function that generates a list of RBAC Rules given a configuration of type Config // It's expected that type Config is the configuration used by a parser. -type RBACRuleGenerator[ComponentConfigType any] func(logger logr.Logger, config ComponentConfigType) ([]rbacv1.PolicyRule, error) +type ClusterRoleRulesGenerator[ComponentConfigType any] func(logger logr.Logger, config ComponentConfigType) ([]rbacv1.PolicyRule, error) + +// RoleGenerator is a function that generates a list of RBACRoles given a configuration of type Config +// It's expected that type Config is the configuration used by a parser. +type RoleGenerator[ComponentConfigType any] func(logger logr.Logger, config ComponentConfigType, componentName string, otelCollectorName string) ([]*rbacv1.Role, error) + +// RoleBindingGenerator is a function that generates a list of RBACRoleBindings given a configuration of type Config +// It's expected that type Config is the configuration used by a parser. +type RoleBindingGenerator[ComponentConfigType any] func(logger logr.Logger, config ComponentConfigType, componentName string, serviceAccountName string, otelCollectorName string, otelCollectorNamespace string) ([]*rbacv1.RoleBinding, error) // ProbeGenerator is a function that generates a valid probe for a container given Config // It's expected that type Config is the configuration used by a parser. @@ -103,8 +111,14 @@ type Parser interface { // of the form "name" or "type/name" Ports(logger logr.Logger, name string, config interface{}) ([]corev1.ServicePort, error) - // GetRBACRules returns the rbac rules for this component - GetRBACRules(logger logr.Logger, config interface{}) ([]rbacv1.PolicyRule, error) + // GetClusterRoleRules returns the rbac rules for this component + GetClusterRoleRules(logger logr.Logger, config interface{}) ([]rbacv1.PolicyRule, error) + + // GetRbacRoleBindings returns the rbac role bindings for this component + GetRbacRoleBindings(logger logr.Logger, otelCollectorName string, config interface{}, serviceAccountName string, otelCollectorNamespace string) ([]*rbacv1.RoleBinding, error) + + // GetRbacRoles returns the rbac roles for this component + GetRbacRoles(logger logr.Logger, otelCollectorName string, config interface{}) ([]*rbacv1.Role, error) // GetLivenessProbe returns a liveness probe set for the collector GetLivenessProbe(logger logr.Logger, config interface{}) (*corev1.Probe, error) diff --git a/internal/components/generic_parser.go b/internal/components/generic_parser.go index a3a40e819d..663a98349e 100644 --- a/internal/components/generic_parser.go +++ b/internal/components/generic_parser.go @@ -30,14 +30,16 @@ var ( // GenericParser serves as scaffolding for custom parsing logic by isolating // functionality to idempotent functions. type GenericParser[T any] struct { - name string - settings *Settings[T] - portParser PortParser[T] - rbacGen RBACRuleGenerator[T] - envVarGen EnvVarGenerator[T] - livenessGen ProbeGenerator[T] - readinessGen ProbeGenerator[T] - defaultsApplier Defaulter[T] + name string + settings *Settings[T] + portParser PortParser[T] + clusterRoleRulesGen ClusterRoleRulesGenerator[T] + roleGen RoleGenerator[T] + roleBindingGen RoleBindingGenerator[T] + envVarGen EnvVarGenerator[T] + livenessGen ProbeGenerator[T] + readinessGen ProbeGenerator[T] + defaultsApplier Defaulter[T] } func (g *GenericParser[T]) GetDefaultConfig(logger logr.Logger, config interface{}) (interface{}, error) { @@ -78,15 +80,38 @@ func (g *GenericParser[T]) GetReadinessProbe(logger logr.Logger, config interfac return g.readinessGen(logger, parsed) } -func (g *GenericParser[T]) GetRBACRules(logger logr.Logger, config interface{}) ([]rbacv1.PolicyRule, error) { - if g.rbacGen == nil { +func (g *GenericParser[T]) GetClusterRoleRules(logger logr.Logger, config interface{}) ([]rbacv1.PolicyRule, error) { + if g.clusterRoleRulesGen == nil { return nil, nil } var parsed T if err := mapstructure.Decode(config, &parsed); err != nil { return nil, err } - return g.rbacGen(logger, parsed) + return g.clusterRoleRulesGen(logger, parsed) +} + +func (g *GenericParser[T]) GetRbacRoles(logger logr.Logger, otelCollectorName string, config interface{}) ([]*rbacv1.Role, error) { + if g.roleGen == nil { + return nil, nil + } + var parsed T + if err := mapstructure.Decode(config, &parsed); err != nil { + return nil, err + } + return g.roleGen(logger, parsed, g.name, otelCollectorName) +} + +func (g *GenericParser[T]) GetRbacRoleBindings(logger logr.Logger, otelCollectorName string, config interface{}, serviceAccountName string, otelCollectorNamespace string) ([]*rbacv1.RoleBinding, error) { + if g.roleBindingGen == nil { + return nil, nil + } + var parsed T + if err := mapstructure.Decode(config, &parsed); err != nil { + return nil, err + } + + return g.roleBindingGen(logger, parsed, g.name, serviceAccountName, otelCollectorName, otelCollectorNamespace) } func (g *GenericParser[T]) GetEnvironmentVariables(logger logr.Logger, config interface{}) ([]corev1.EnvVar, error) { diff --git a/internal/components/generic_parser_test.go b/internal/components/generic_parser_test.go index 0c1a2a65b0..312d4daa5b 100644 --- a/internal/components/generic_parser_test.go +++ b/internal/components/generic_parser_test.go @@ -145,7 +145,7 @@ func TestGenericParser_GetRBACRules(t *testing.T) { tests := []testCase[*components.SingleEndpointConfig]{ { name: "valid config with endpoint", - g: components.NewSinglePortParserBuilder("test", 0).WithRbacGen(rbacGenFunc).MustBuild(), + g: components.NewSinglePortParserBuilder("test", 0).WithClusterRoleRulesGen(rbacGenFunc).MustBuild(), args: args{ logger: logr.Discard(), config: map[string]interface{}{ @@ -163,7 +163,7 @@ func TestGenericParser_GetRBACRules(t *testing.T) { }, { name: "valid config with listen_address", - g: components.NewSinglePortParserBuilder("test", 0).WithRbacGen(rbacGenFunc).MustBuild(), + g: components.NewSinglePortParserBuilder("test", 0).WithClusterRoleRulesGen(rbacGenFunc).MustBuild(), args: args{ logger: logr.Discard(), config: map[string]interface{}{ @@ -181,7 +181,7 @@ func TestGenericParser_GetRBACRules(t *testing.T) { }, { name: "invalid config with no endpoint or listen_address", - g: components.NewSinglePortParserBuilder("test", 0).WithRbacGen(rbacGenFunc).MustBuild(), + g: components.NewSinglePortParserBuilder("test", 0).WithClusterRoleRulesGen(rbacGenFunc).MustBuild(), args: args{ logger: logr.Discard(), config: map[string]interface{}{}, @@ -201,7 +201,7 @@ func TestGenericParser_GetRBACRules(t *testing.T) { }, { name: "failed to parse config", - g: components.NewSinglePortParserBuilder("test", 0).WithRbacGen(rbacGenFunc).MustBuild(), + g: components.NewSinglePortParserBuilder("test", 0).WithClusterRoleRulesGen(rbacGenFunc).MustBuild(), args: args{ logger: logr.Discard(), config: func() {}, @@ -213,8 +213,8 @@ func TestGenericParser_GetRBACRules(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := tt.g.GetRBACRules(tt.args.logger, tt.args.config) - if !tt.wantErr(t, err, fmt.Sprintf("GetRBACRules(%v, %v)", tt.args.logger, tt.args.config)) { + got, err := tt.g.GetClusterRoleRules(tt.args.logger, tt.args.config) + if !tt.wantErr(t, err, fmt.Sprintf("GetClusterRoleRules(%v, %v)", tt.args.logger, tt.args.config)) { return } assert.Equalf(t, tt.want, got, "GetRBACRules(%v, %v)", tt.args.logger, tt.args.config) diff --git a/internal/components/multi_endpoint.go b/internal/components/multi_endpoint.go index 9c7019cb6d..5fbdc997e3 100644 --- a/internal/components/multi_endpoint.go +++ b/internal/components/multi_endpoint.go @@ -112,7 +112,15 @@ func (m *MultiPortReceiver) GetReadinessProbe(logger logr.Logger, config interfa return nil, nil } -func (m *MultiPortReceiver) GetRBACRules(logr.Logger, interface{}) ([]rbacv1.PolicyRule, error) { +func (m *MultiPortReceiver) GetClusterRoleRules(logger logr.Logger, config interface{}) ([]rbacv1.PolicyRule, error) { + return nil, nil +} + +func (m *MultiPortReceiver) GetRbacRoleBindings(logger logr.Logger, otelCollectorName string, config interface{}, serviceAccountName string, otelCollectorNamespace string) ([]*rbacv1.RoleBinding, error) { + return nil, nil +} + +func (m *MultiPortReceiver) GetRbacRoles(logger logr.Logger, otelCollectorName string, config interface{}) ([]*rbacv1.Role, error) { return nil, nil } diff --git a/internal/components/multi_endpoint_test.go b/internal/components/multi_endpoint_test.go index 645e2b394c..3ef73bc7c1 100644 --- a/internal/components/multi_endpoint_test.go +++ b/internal/components/multi_endpoint_test.go @@ -355,7 +355,7 @@ func TestMultiPortReceiver_Ports(t *testing.T) { return } assert.ElementsMatchf(t, tt.want, got, "Ports(%v)", tt.args.config) - rbacGen, err := s.GetRBACRules(logr.Discard(), tt.args.config) + rbacGen, err := s.GetClusterRoleRules(logr.Discard(), tt.args.config) assert.NoError(t, err) assert.Nil(t, rbacGen) livenessProbe, livenessErr := s.GetLivenessProbe(logr.Discard(), tt.args.config) diff --git a/internal/components/processors/helpers.go b/internal/components/processors/helpers.go index ab1277b186..6c210fba5c 100644 --- a/internal/components/processors/helpers.go +++ b/internal/components/processors/helpers.go @@ -39,8 +39,14 @@ func ProcessorFor(name string) components.Parser { } var componentParsers = []components.Parser{ - components.NewBuilder[K8sAttributeConfig]().WithName("k8sattributes").WithRbacGen(GenerateK8SAttrRbacRules).MustBuild(), - components.NewBuilder[ResourceDetectionConfig]().WithName("resourcedetection").WithRbacGen(GenerateResourceDetectionRbacRules).MustBuild(), + components.NewBuilder[K8sAttributeConfig](). + WithName("k8sattributes"). + WithClusterRoleRulesGen(generateK8SAttrClusterRoleRules). + MustBuild(), + components.NewBuilder[ResourceDetectionConfig](). + WithName("resourcedetection"). + WithClusterRoleRulesGen(generateResourceDetectionClusterRoleRules). + MustBuild(), } func init() { diff --git a/internal/components/processors/k8sattribute.go b/internal/components/processors/k8sattribute.go index f9d5266c60..ee0b106727 100644 --- a/internal/components/processors/k8sattribute.go +++ b/internal/components/processors/k8sattribute.go @@ -42,7 +42,7 @@ type K8sAttributeConfig struct { Extract Extract `mapstructure:"extract"` } -func GenerateK8SAttrRbacRules(_ logr.Logger, config K8sAttributeConfig) ([]rbacv1.PolicyRule, error) { +func generateK8SAttrClusterRoleRules(_ logr.Logger, config K8sAttributeConfig) ([]rbacv1.PolicyRule, error) { // These policies need to be added always var prs = []rbacv1.PolicyRule{ { diff --git a/internal/components/processors/k8sattribute_test.go b/internal/components/processors/k8sattribute_test.go index 604656f93a..4f2e52745c 100644 --- a/internal/components/processors/k8sattribute_test.go +++ b/internal/components/processors/k8sattribute_test.go @@ -141,11 +141,11 @@ func TestGenerateK8SAttrRbacRules(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := processors.ProcessorFor("k8sattributes") - got, err := parser.GetRBACRules(logger, tt.args.config) - if !tt.wantErr(t, err, fmt.Sprintf("GetRBACRules(%v)", tt.args.config)) { + got, err := parser.GetClusterRoleRules(logger, tt.args.config) + if !tt.wantErr(t, err, fmt.Sprintf("GetClusterRoleRules(%v)", tt.args.config)) { return } - assert.Equalf(t, tt.want, got, "GetRBACRules(%v)", tt.args.config) + assert.Equalf(t, tt.want, got, "GetClusterRoleRules(%v)", tt.args.config) }) } } diff --git a/internal/components/processors/resourcedetection.go b/internal/components/processors/resourcedetection.go index 5d5af17c11..6cbe3acb43 100644 --- a/internal/components/processors/resourcedetection.go +++ b/internal/components/processors/resourcedetection.go @@ -27,7 +27,7 @@ type ResourceDetectionConfig struct { Detectors []string `mapstructure:"detectors"` } -func GenerateResourceDetectionRbacRules(_ logr.Logger, config ResourceDetectionConfig) ([]rbacv1.PolicyRule, error) { +func generateResourceDetectionClusterRoleRules(_ logr.Logger, config ResourceDetectionConfig) ([]rbacv1.PolicyRule, error) { var prs []rbacv1.PolicyRule for _, d := range config.Detectors { detectorName := fmt.Sprint(d) diff --git a/internal/components/processors/resourcedetection_test.go b/internal/components/processors/resourcedetection_test.go index 4d6b448d40..172d24e93a 100644 --- a/internal/components/processors/resourcedetection_test.go +++ b/internal/components/processors/resourcedetection_test.go @@ -111,11 +111,11 @@ func TestGenerateResourceDetectionRbacRules(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := processors.ProcessorFor("resourcedetection") - got, err := parser.GetRBACRules(logger, tt.args.config) - if !tt.wantErr(t, err, fmt.Sprintf("GetRBACRules(%v)", tt.args.config)) { + got, err := parser.GetClusterRoleRules(logger, tt.args.config) + if !tt.wantErr(t, err, fmt.Sprintf("GetClusterRoleRules(%v)", tt.args.config)) { return } - assert.Equalf(t, tt.want, got, "GetRBACRules(%v)", tt.args.config) + assert.Equalf(t, tt.want, got, "GetClusterRoleRules(%v)", tt.args.config) }) } } diff --git a/internal/components/receivers/helpers.go b/internal/components/receivers/helpers.go index 43ebaa0d06..24613e3b0a 100644 --- a/internal/components/receivers/helpers.go +++ b/internal/components/receivers/helpers.go @@ -137,19 +137,23 @@ var ( WithTargetPort(3100). MustBuild(), components.NewBuilder[kubeletStatsConfig]().WithName("kubeletstats"). - WithRbacGen(generateKubeletStatsRbacRules). + WithClusterRoleRulesGen(generateKubeletStatsClusterRoleRules). WithEnvVarGen(generateKubeletStatsEnvVars). MustBuild(), components.NewBuilder[k8seventsConfig]().WithName("k8s_events"). - WithRbacGen(generatek8seventsRbacRules). + WithClusterRoleRulesGen(generatek8seventsClusterRoleRules). MustBuild(), components.NewBuilder[k8sclusterConfig]().WithName("k8s_cluster"). - WithRbacGen(generatek8sclusterRbacRules). + WithClusterRoleRulesGen(generatek8sclusterRbacRules). MustBuild(), components.NewBuilder[k8sobjectsConfig]().WithName("k8sobjects"). - WithRbacGen(generatek8sobjectsRbacRules). + WithClusterRoleRulesGen(generatek8sobjectsClusterRoleRules). + MustBuild(), + components.NewBuilder[prometheusReceiverConfig]().WithName("prometheus"). + WithPort(components.UnsetPort). + WithRoleGen(generatePrometheusReceiverRoles). + WithRoleBindingGen(generatePrometheusReceiverRoleBindings). MustBuild(), - NewScraperParser("prometheus"), NewScraperParser("sshcheck"), NewScraperParser("cloudfoundry"), NewScraperParser("vcenter"), diff --git a/internal/components/receivers/k8sevents.go b/internal/components/receivers/k8sevents.go index e9d6d45a88..38acca61e4 100644 --- a/internal/components/receivers/k8sevents.go +++ b/internal/components/receivers/k8sevents.go @@ -21,7 +21,7 @@ import ( type k8seventsConfig struct{} -func generatek8seventsRbacRules(_ logr.Logger, _ k8seventsConfig) ([]rbacv1.PolicyRule, error) { +func generatek8seventsClusterRoleRules(_ logr.Logger, _ k8seventsConfig) ([]rbacv1.PolicyRule, error) { // The k8s Events Receiver needs get permissions on the following resources always. return []rbacv1.PolicyRule{ { diff --git a/internal/components/receivers/k8sobjects.go b/internal/components/receivers/k8sobjects.go index 10505ad35c..dfcdeab17e 100644 --- a/internal/components/receivers/k8sobjects.go +++ b/internal/components/receivers/k8sobjects.go @@ -29,7 +29,7 @@ type k8sObject struct { Group string `yaml:"group,omitempty"` } -func generatek8sobjectsRbacRules(_ logr.Logger, config k8sobjectsConfig) ([]rbacv1.PolicyRule, error) { +func generatek8sobjectsClusterRoleRules(_ logr.Logger, config k8sobjectsConfig) ([]rbacv1.PolicyRule, error) { // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8sobjectsreceiver#rbac prs := []rbacv1.PolicyRule{} for _, obj := range config.Objects { diff --git a/internal/components/receivers/k8sobjects_test.go b/internal/components/receivers/k8sobjects_test.go index 647882f572..d20cd3fd76 100644 --- a/internal/components/receivers/k8sobjects_test.go +++ b/internal/components/receivers/k8sobjects_test.go @@ -128,7 +128,7 @@ func Test_generatek8sobjectsRbacRules(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := generatek8sobjectsRbacRules(logr.Logger{}, tt.config) + got, err := generatek8sobjectsClusterRoleRules(logr.Logger{}, tt.config) assert.NoError(t, err) assert.Equal(t, tt.want, got) }) diff --git a/internal/components/receivers/kubeletstats.go b/internal/components/receivers/kubeletstats.go index 43f2be8697..02ca6bc03d 100644 --- a/internal/components/receivers/kubeletstats.go +++ b/internal/components/receivers/kubeletstats.go @@ -52,7 +52,7 @@ func generateKubeletStatsEnvVars(_ logr.Logger, config kubeletStatsConfig) ([]co }, nil } -func generateKubeletStatsRbacRules(_ logr.Logger, config kubeletStatsConfig) ([]rbacv1.PolicyRule, error) { +func generateKubeletStatsClusterRoleRules(_ logr.Logger, config kubeletStatsConfig) ([]rbacv1.PolicyRule, error) { // The Kubelet Stats Receiver needs get permissions on the nodes/stats resources always. prs := []rbacv1.PolicyRule{ { diff --git a/internal/components/receivers/kubeletstats_test.go b/internal/components/receivers/kubeletstats_test.go index 246aec5dee..c968dd1df0 100644 --- a/internal/components/receivers/kubeletstats_test.go +++ b/internal/components/receivers/kubeletstats_test.go @@ -85,7 +85,7 @@ func TestGenerateKubeletStatsRbacRules(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - rules, err := generateKubeletStatsRbacRules(logr.Logger{}, tt.config) + rules, err := generateKubeletStatsClusterRoleRules(logr.Logger{}, tt.config) if tt.expectedErrMsg != "" { require.Error(t, err) diff --git a/internal/components/receivers/prometheus.go b/internal/components/receivers/prometheus.go new file mode 100644 index 0000000000..da696407ff --- /dev/null +++ b/internal/components/receivers/prometheus.go @@ -0,0 +1,160 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package receivers + +import ( + "github.com/go-logr/logr" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/open-telemetry/opentelemetry-operator/internal/naming" +) + +type namespaces struct { + Names []string `mapstructure:"names"` +} + +type kubernetesSDConfig struct { + Namespaces namespaces `mapstructure:"namespaces"` + Role string `mapstructure:"role"` +} + +type scrapeConfig struct { + KubernetesSDConfigs []kubernetesSDConfig `mapstructure:"kubernetes_sd_configs"` + JobName string `mapstructure:"job_name"` +} + +type prometheusConfig struct { + ScrapeConfigs *[]scrapeConfig `mapstructure:"scrape_configs"` +} + +type prometheusReceiverConfig struct { + Config *prometheusConfig `mapstructure:"config"` +} + +func generatePrometheusReceiverRoles(logger logr.Logger, config prometheusReceiverConfig, componentName string, otelCollectorName string) ([]*rbacv1.Role, error) { + if config.Config == nil { + return nil, nil + } + + if config.Config.ScrapeConfigs == nil { + return nil, nil + } + + var roles []*rbacv1.Role + + for _, scrapeConfig := range *config.Config.ScrapeConfigs { + for _, kubernetesSDConfig := range scrapeConfig.KubernetesSDConfigs { + var rule rbacv1.PolicyRule + switch kubernetesSDConfig.Role { + case "pod": + rule = rbacv1.PolicyRule{ + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"get", "watch", "list"}, + } + case "node": + rule = rbacv1.PolicyRule{ + APIGroups: []string{""}, + Resources: []string{"nodes"}, + Verbs: []string{"get", "watch", "list"}, + } + case "service": + rule = rbacv1.PolicyRule{ + APIGroups: []string{""}, + Resources: []string{"services"}, + Verbs: []string{"get", "watch", "list"}, + } + case "endpoints": + rule = rbacv1.PolicyRule{ + APIGroups: []string{""}, + Resources: []string{"endpoints", "services"}, + Verbs: []string{"get", "watch", "list"}, + } + case "ingress": + rule = rbacv1.PolicyRule{ + APIGroups: []string{"networking.k8s.io"}, + Resources: []string{"ingresses"}, + Verbs: []string{"get", "watch", "list"}, + } + default: + logger.Info("unsupported role used for prometheus receiver", "role", kubernetesSDConfig.Role) + continue + } + + for _, namespace := range kubernetesSDConfig.Namespaces.Names { + // We need to create a role for each namespace and role + roles = append(roles, &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: getRoleName(scrapeConfig.JobName, componentName, otelCollectorName), + Namespace: namespace, + }, + Rules: []rbacv1.PolicyRule{rule}, + }) + } + } + } + return roles, nil +} + +func generatePrometheusReceiverRoleBindings(logger logr.Logger, config prometheusReceiverConfig, componentName string, serviceAccountName string, otelCollectorName string, otelCollectorNamespace string) ([]*rbacv1.RoleBinding, error) { + if config.Config == nil { + return nil, nil + } + + if config.Config.ScrapeConfigs == nil { + return nil, nil + } + + var roleBindings []*rbacv1.RoleBinding + + for _, scrapeConfig := range *config.Config.ScrapeConfigs { + for _, kubernetesSDConfig := range scrapeConfig.KubernetesSDConfigs { + for _, namespace := range kubernetesSDConfig.Namespaces.Names { + + rb := rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: getRoleBindingName(scrapeConfig.JobName, componentName, otelCollectorName), + Namespace: namespace, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: serviceAccountName, + Namespace: otelCollectorNamespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: getRoleName(scrapeConfig.JobName, componentName, otelCollectorName), + }, + } + + roleBindings = append(roleBindings, &rb) + } + } + } + + return roleBindings, nil +} + +func getRoleName(jobName string, componentName string, otelCollectorName string) string { + return naming.Role(otelCollectorName, jobName+"-"+componentName) +} + +func getRoleBindingName(jobName string, componentName string, otelCollectorName string) string { + return naming.RoleBinding(otelCollectorName, jobName+"-"+componentName) +} diff --git a/internal/components/receivers/prometheus_test.go b/internal/components/receivers/prometheus_test.go new file mode 100644 index 0000000000..da8af756f7 --- /dev/null +++ b/internal/components/receivers/prometheus_test.go @@ -0,0 +1,310 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package receivers + +import ( + "testing" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/testr" + "github.com/stretchr/testify/assert" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestGeneratePrometheusReceiverRoles(t *testing.T) { + tests := []struct { + name string + config prometheusReceiverConfig + componentName string + want []*rbacv1.Role + }{ + { + name: "nil config", + config: prometheusReceiverConfig{ + Config: nil, + }, + componentName: "component", + want: nil, + }, + { + name: "nil scrape configs", + config: prometheusReceiverConfig{ + Config: &prometheusConfig{ + ScrapeConfigs: nil, + }, + }, + componentName: "component", + want: nil, + }, + { + name: "single pod role with multiple namespaces", + config: prometheusReceiverConfig{ + Config: &prometheusConfig{ + ScrapeConfigs: &[]scrapeConfig{ + { + JobName: "job", + KubernetesSDConfigs: []kubernetesSDConfig{ + { + Role: "pod", + Namespaces: namespaces{ + Names: []string{"ns1", "ns2"}, + }, + }, + }, + }, + }, + }, + }, + componentName: "component", + want: []*rbacv1.Role{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job-component-role", + Namespace: "ns1", + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"get", "watch", "list"}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job-component-role", + Namespace: "ns2", + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"get", "watch", "list"}, + }, + }, + }, + }, + }, + { + name: "multiple roles and namespaces", + config: prometheusReceiverConfig{ + Config: &prometheusConfig{ + ScrapeConfigs: &[]scrapeConfig{ + { + JobName: "job", + KubernetesSDConfigs: []kubernetesSDConfig{ + { + Role: "pod", + Namespaces: namespaces{ + Names: []string{"ns1"}, + }, + }, + { + Role: "service", + Namespaces: namespaces{ + Names: []string{"ns2"}, + }, + }, + }, + }, + }, + }, + }, + componentName: "component", + want: []*rbacv1.Role{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job-component-role", + Namespace: "ns1", + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"get", "watch", "list"}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job-component-role", + Namespace: "ns2", + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"services"}, + Verbs: []string{"get", "watch", "list"}, + }, + }, + }, + }, + }, + { + name: "unsupported role type", + config: prometheusReceiverConfig{ + Config: &prometheusConfig{ + ScrapeConfigs: &[]scrapeConfig{ + { + JobName: "job", + KubernetesSDConfigs: []kubernetesSDConfig{ + { + Role: "unsupported", + Namespaces: namespaces{ + Names: []string{"ns1"}, + }, + }, + }, + }, + }, + }, + }, + componentName: "component", + want: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + logger := testr.New(t) + got, err := generatePrometheusReceiverRoles(logger, tt.config, tt.componentName, "test") + assert.NoError(t, err) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestGeneratePrometheusReceiverRoleBindings(t *testing.T) { + namespace := "test-ns" + tests := []struct { + name string + config prometheusReceiverConfig + componentName string + serviceAccountName string + want int // number of expected role bindings + }{ + { + name: "nil config", + config: prometheusReceiverConfig{ + Config: nil, + }, + componentName: "test-component", + serviceAccountName: "test-sa", + want: 0, + }, + { + name: "nil scrape configs", + config: prometheusReceiverConfig{ + Config: &prometheusConfig{ + ScrapeConfigs: nil, + }, + }, + componentName: "test-component", + serviceAccountName: "test-sa", + want: 0, + }, + { + name: "single namespace and job", + config: prometheusReceiverConfig{ + Config: &prometheusConfig{ + ScrapeConfigs: &[]scrapeConfig{ + { + JobName: "test-job", + KubernetesSDConfigs: []kubernetesSDConfig{ + { + Role: "pod", + Namespaces: namespaces{ + Names: []string{"test-ns"}, + }, + }, + }, + }, + }, + }, + }, + componentName: "test-component", + serviceAccountName: "test-sa", + want: 1, + }, + { + name: "multiple namespaces and jobs", + config: prometheusReceiverConfig{ + Config: &prometheusConfig{ + ScrapeConfigs: &[]scrapeConfig{ + { + JobName: "test-job-1", + KubernetesSDConfigs: []kubernetesSDConfig{ + { + Role: "pod", + Namespaces: namespaces{ + Names: []string{"test-ns-1", "test-ns-2"}, + }, + }, + }, + }, + { + JobName: "test-job-2", + KubernetesSDConfigs: []kubernetesSDConfig{ + { + Role: "service", + Namespaces: namespaces{ + Names: []string{"test-ns-3"}, + }, + }, + }, + }, + }, + }, + }, + componentName: "test-component", + serviceAccountName: "test-sa", + want: 3, + }, + } + + logger := logr.Discard() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := generatePrometheusReceiverRoleBindings(logger, tt.config, tt.componentName, tt.serviceAccountName, "test", namespace) + if err != nil { + t.Errorf("generatePrometheusReceiverRoleBindings() error = %v", err) + return + } + + if len(got) != tt.want { + t.Errorf("generatePrometheusReceiverRoleBindings() got %d role bindings, want %d", len(got), tt.want) + } + + // For non-empty results, verify the role binding properties + if len(got) > 0 { + rb := got[0] + if rb.Subjects[0].Name != tt.serviceAccountName { + t.Errorf("Role binding has wrong service account name, got %s, want %s", + rb.Subjects[0].Name, tt.serviceAccountName) + } + if rb.Subjects[0].Kind != rbacv1.ServiceAccountKind { + t.Errorf("Role binding has wrong subject kind, got %s, want %s", + rb.Subjects[0].Kind, rbacv1.ServiceAccountKind) + } + if rb.RoleRef.Kind != "Role" { + t.Errorf("Role binding has wrong role ref kind, got %s, want Role", + rb.RoleRef.Kind) + } + } + }) + } +} diff --git a/internal/manifests/builder.go b/internal/manifests/builder.go index e1c42df7d5..58dc486821 100644 --- a/internal/manifests/builder.go +++ b/internal/manifests/builder.go @@ -23,8 +23,10 @@ import ( type Builder[Params any] func(params Params) ([]client.Object, error) type ManifestFactory[T client.Object, Params any] func(params Params) (T, error) +type ManifestSliceFactory[T ~[]client.Object, Params any] func(params Params) (T, error) type SimpleManifestFactory[T client.Object, Params any] func(params Params) T type K8sManifestFactory[Params any] ManifestFactory[client.Object, Params] +type K8sManifestSliceFactory[Params any] ManifestSliceFactory[[]client.Object, Params] func FactoryWithoutError[T client.Object, Params any](f SimpleManifestFactory[T, Params]) K8sManifestFactory[Params] { return func(params Params) (client.Object, error) { @@ -38,6 +40,12 @@ func Factory[T client.Object, Params any](f ManifestFactory[T, Params]) K8sManif } } +func FactorySlice[T []client.Object, Params any](f ManifestSliceFactory[T, Params]) K8sManifestSliceFactory[Params] { + return func(params Params) ([]client.Object, error) { + return f(params) + } +} + // ObjectIsNotNil ensures that we only create an object IFF it isn't nil, // and it's concrete type isn't nil either. This works around the Go type system // by using reflection to verify its concrete type isn't nil. diff --git a/internal/manifests/collector/collector.go b/internal/manifests/collector/collector.go index 0e4cc414d5..409f612eb0 100644 --- a/internal/manifests/collector/collector.go +++ b/internal/manifests/collector/collector.go @@ -34,6 +34,7 @@ const ( func Build(params manifests.Params) ([]client.Object, error) { var resourceManifests []client.Object var manifestFactories []manifests.K8sManifestFactory[manifests.Params] + var manifestSliceFactories []manifests.K8sManifestSliceFactory[manifests.Params] switch params.OtelCol.Spec.Mode { case v1beta1.ModeDeployment: manifestFactories = append(manifestFactories, manifests.Factory(Deployment)) @@ -85,6 +86,21 @@ func Build(params manifests.Params) ([]client.Object, error) { } } + manifestSliceFactories = append( + manifestSliceFactories, + manifests.FactorySlice(Role), + manifests.FactorySlice(RoleBinding), + manifests.FactorySlice(Routes), + ) + + for _, factory := range manifestSliceFactories { + objs, err := factory(params) + if err != nil { + return nil, err + } + resourceManifests = append(resourceManifests, objs...) + } + if needsCheckSaPermissions(params) { warnings, err := CheckRbacRules(params, params.OtelCol.Spec.ServiceAccount) if err != nil { @@ -98,14 +114,6 @@ func Build(params manifests.Params) ([]client.Object, error) { return nil, errors.Join(w...) } - routes, err := Routes(params) - if err != nil { - return nil, err - } - // NOTE: we cannot just unpack the slice, the type checker doesn't coerce the type correctly. - for _, route := range routes { - resourceManifests = append(resourceManifests, route) - } return resourceManifests, nil } diff --git a/internal/manifests/collector/rbac.go b/internal/manifests/collector/rbac.go index 9ae0a65f1f..6649054487 100644 --- a/internal/manifests/collector/rbac.go +++ b/internal/manifests/collector/rbac.go @@ -20,6 +20,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/open-telemetry/opentelemetry-operator/internal/manifests" "github.com/open-telemetry/opentelemetry-operator/internal/manifests/manifestutils" @@ -28,7 +29,7 @@ import ( ) func ClusterRole(params manifests.Params) (*rbacv1.ClusterRole, error) { - rules, err := params.OtelCol.Spec.Config.GetAllRbacRules(params.Log) + rules, err := params.OtelCol.Spec.Config.GetAllClusterRoleRbacRules(params.Log) if err != nil { return nil, err } else if len(rules) == 0 { @@ -54,7 +55,7 @@ func ClusterRole(params manifests.Params) (*rbacv1.ClusterRole, error) { } func ClusterRoleBinding(params manifests.Params) (*rbacv1.ClusterRoleBinding, error) { - rules, err := params.OtelCol.Spec.Config.GetAllRbacRules(params.Log) + rules, err := params.OtelCol.Spec.Config.GetAllClusterRoleRbacRules(params.Log) if err != nil { return nil, err } else if len(rules) == 0 { @@ -90,14 +91,55 @@ func ClusterRoleBinding(params manifests.Params) (*rbacv1.ClusterRoleBinding, er }, nil } +func Role(params manifests.Params) ([]client.Object, error) { + roles, err := params.OtelCol.Spec.Config.GetAllRbacRoles(params.Log, params.OtelCol.Name) + if err != nil { + return nil, err + } + + // Convert []*rbacv1.Role to []client.Object + result := make([]client.Object, len(roles)) + for i, role := range roles { + result[i] = role + } + + return result, nil +} + +func RoleBinding(params manifests.Params) ([]client.Object, error) { + rbs, err := params.OtelCol.Spec.Config.GetAllRbacRoleBindings(params.Log, ServiceAccountName(params.OtelCol), params.OtelCol.Name, params.OtelCol.Namespace) + if err != nil { + return nil, err + } else if len(rbs) == 0 { + return nil, nil + } + + annotations, err := manifestutils.Annotations(params.OtelCol, params.Config.AnnotationsFilter()) + if err != nil { + return nil, err + } + + for _, rb := range rbs { + rb.ObjectMeta.Labels = manifestutils.Labels(params.OtelCol.ObjectMeta, rb.ObjectMeta.Name, params.OtelCol.Spec.Image, ComponentOpenTelemetryCollector, params.Config.LabelsFilter()) + rb.ObjectMeta.Annotations = annotations + } + + // Convert []*rbacv1.RoleBinding to []client.Object + result := make([]client.Object, len(rbs)) + for i, role := range rbs { + result[i] = role + } + + return result, nil +} + func CheckRbacRules(params manifests.Params, saName string) ([]string, error) { ctx := context.Background() - rules, err := params.OtelCol.Spec.Config.GetAllRbacRules(params.Log) + rules, err := params.OtelCol.Spec.Config.GetAllClusterRoleRbacRules(params.Log) if err != nil { return nil, err } - r := []*rbacv1.PolicyRule{} for _, rule := range rules { diff --git a/internal/manifests/collector/route.go b/internal/manifests/collector/route.go index 29fa385c38..92ff19a2d5 100644 --- a/internal/manifests/collector/route.go +++ b/internal/manifests/collector/route.go @@ -20,6 +20,7 @@ import ( routev1 "github.com/openshift/api/route/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/open-telemetry/opentelemetry-operator/apis/v1beta1" "github.com/open-telemetry/opentelemetry-operator/internal/autodetect/openshift" @@ -27,7 +28,7 @@ import ( "github.com/open-telemetry/opentelemetry-operator/internal/naming" ) -func Routes(params manifests.Params) ([]*routev1.Route, error) { +func Routes(params manifests.Params) ([]client.Object, error) { if params.OtelCol.Spec.Ingress.Type != v1beta1.IngressTypeRoute || params.Config.OpenShiftRoutesAvailability() != openshift.RoutesAvailable { return nil, nil } @@ -97,5 +98,12 @@ func Routes(params manifests.Params) ([]*routev1.Route, error) { }, } } - return routes, nil + + // Convert []*routev1.Route to []client.Object + result := make([]client.Object, len(routes)) + for i, route := range routes { + result[i] = route + } + + return result, nil } diff --git a/internal/manifests/collector/route_test.go b/internal/manifests/collector/route_test.go index b7ec294e7b..4192338107 100644 --- a/internal/manifests/collector/route_test.go +++ b/internal/manifests/collector/route_test.go @@ -145,9 +145,9 @@ func TestDesiredRoutes(t *testing.T) { routes, err := Routes(params) assert.NoError(t, err) require.Equal(t, 3, len(routes)) - assert.Equal(t, "web.example.com", routes[0].Spec.Host) - assert.Equal(t, "otlp-grpc.example.com", routes[1].Spec.Host) - assert.Equal(t, "otlp-test-grpc.example.com", routes[2].Spec.Host) + assert.Equal(t, "web.example.com", routes[0].(*routev1.Route).Spec.Host) + assert.Equal(t, "otlp-grpc.example.com", routes[1].(*routev1.Route).Spec.Host) + assert.Equal(t, "otlp-test-grpc.example.com", routes[2].(*routev1.Route).Spec.Host) }) t.Run("hostname is not set", func(t *testing.T) { params, err := newParams("something:tag", testFileIngress) @@ -166,9 +166,9 @@ func TestDesiredRoutes(t *testing.T) { routes, err := Routes(params) assert.NoError(t, err) require.Equal(t, 3, len(routes)) - assert.Equal(t, "", routes[0].Spec.Host) - assert.Equal(t, "", routes[1].Spec.Host) - assert.Equal(t, "", routes[2].Spec.Host) + assert.Equal(t, "", routes[0].(*routev1.Route).Spec.Host) + assert.Equal(t, "", routes[1].(*routev1.Route).Spec.Host) + assert.Equal(t, "", routes[2].(*routev1.Route).Spec.Host) }) } diff --git a/internal/manifests/mutate.go b/internal/manifests/mutate.go index fda0e22dbb..583e2d48cc 100644 --- a/internal/manifests/mutate.go +++ b/internal/manifests/mutate.go @@ -235,12 +235,18 @@ func mutateRole(existing, desired *rbacv1.Role) { existing.Annotations = desired.Annotations existing.Labels = desired.Labels existing.Rules = desired.Rules + // This role can exists in a different namespace than the otel collector, so we need to remove the owner references + // since cross namespace owner references are not allowed + existing.SetOwnerReferences(nil) } func mutateRoleBinding(existing, desired *rbacv1.RoleBinding) { existing.Annotations = desired.Annotations existing.Labels = desired.Labels existing.Subjects = desired.Subjects + // This role binding can exists in a different namespace than the otel collector, so we need to remove the owner references + // since cross namespace owner references are not allowed + existing.SetOwnerReferences(nil) } func mutateAutoscalingHPA(existing, desired *autoscalingv2.HorizontalPodAutoscaler) { diff --git a/internal/naming/main.go b/internal/naming/main.go index 149a9f9d5a..e654fb84a5 100644 --- a/internal/naming/main.go +++ b/internal/naming/main.go @@ -146,6 +146,16 @@ func ClusterRoleBinding(otelcol, namespace string) string { return DNSName(Truncate("%s-%s-collector", 63, otelcol, namespace)) } +// Role builds the role name based on the instance. +func Role(otelcol string, roleName string) string { + return DNSName(Truncate("%s-%s-role", 63, otelcol, roleName)) +} + +// RoleBinding builds the role binding name based on the instance. +func RoleBinding(otelcol, roleName string) string { + return DNSName(Truncate("%s-%s-role-binding", 63, otelcol, roleName)) +} + // TAService returns the name to use for the TargetAllocator service. func TAService(taName string) string { return DNSName(Truncate("%s-targetallocator", 63, taName)) diff --git a/tests/e2e-automatic-rbac/extra-permissions-operator/endpoints.yaml b/tests/e2e-automatic-rbac/extra-permissions-operator/endpoints.yaml new file mode 100644 index 0000000000..83be62eff8 --- /dev/null +++ b/tests/e2e-automatic-rbac/extra-permissions-operator/endpoints.yaml @@ -0,0 +1,11 @@ +- op: add + path: /rules/- + value: + apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - list + - watch diff --git a/tests/e2e-automatic-rbac/extra-permissions-operator/rbac.yaml b/tests/e2e-automatic-rbac/extra-permissions-operator/rbac.yaml index c36e0c2213..3e44d47f05 100644 --- a/tests/e2e-automatic-rbac/extra-permissions-operator/rbac.yaml +++ b/tests/e2e-automatic-rbac/extra-permissions-operator/rbac.yaml @@ -6,6 +6,8 @@ resources: - clusterrolebindings - clusterroles + - rolebindings + - roles verbs: - create - delete diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/00-install.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/00-install.yaml new file mode 100644 index 0000000000..97c7175979 --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/00-install.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: chainsaw-prometheus-receiver +--- +apiVersion: v1 +kind: Namespace +metadata: + name: chainsaw-prometheus-workload diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/01-assert.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/01-assert.yaml new file mode 100644 index 0000000000..5eda67fa17 --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/01-assert.yaml @@ -0,0 +1,28 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: simplest-test-prometheus-prometheus-role-binding + namespace: chainsaw-prometheus-workload +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: simplest-test-prometheus-prometheus-role +subjects: +- kind: ServiceAccount + name: simplest-collector + namespace: chainsaw-prometheus-receiver +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: simplest-test-prometheus-prometheus-role + namespace: chainsaw-prometheus-workload +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list \ No newline at end of file diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/01-install.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/01-install.yaml new file mode 100644 index 0000000000..42cf75c70b --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/01-install.yaml @@ -0,0 +1,25 @@ +apiVersion: opentelemetry.io/v1beta1 +kind: OpenTelemetryCollector +metadata: + name: simplest + namespace: chainsaw-prometheus-receiver +spec: + config: + receivers: + prometheus: + config: + scrape_configs: + - job_name: test-prometheus + kubernetes_sd_configs: + - role: pod + namespaces: + names: + - chainsaw-prometheus-workload + exporters: + debug: {} + service: + pipelines: + metrics: + receivers: [prometheus] + processors: [] + exporters: [debug] diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/02-assert.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/02-assert.yaml new file mode 100644 index 0000000000..011c44e028 --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/02-assert.yaml @@ -0,0 +1,28 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: simplest-test-prometheus-prometheus-role-binding + namespace: chainsaw-prometheus-workload +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: simplest-test-prometheus-prometheus-role +subjects: +- kind: ServiceAccount + name: simplest-collector + namespace: chainsaw-prometheus-receiver +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: simplest-test-prometheus-prometheus-role + namespace: chainsaw-prometheus-workload +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list \ No newline at end of file diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/02-install.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/02-install.yaml new file mode 100644 index 0000000000..3f765015d6 --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/02-install.yaml @@ -0,0 +1,25 @@ +apiVersion: opentelemetry.io/v1beta1 +kind: OpenTelemetryCollector +metadata: + name: simplest + namespace: chainsaw-prometheus-receiver +spec: + config: + receivers: + prometheus: + config: + scrape_configs: + - job_name: test-prometheus + kubernetes_sd_configs: + - role: node + namespaces: + names: + - chainsaw-prometheus-workload + exporters: + debug: {} + service: + pipelines: + metrics: + receivers: [prometheus] + processors: [] + exporters: [debug] diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/03-assert.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/03-assert.yaml new file mode 100644 index 0000000000..49efa3123a --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/03-assert.yaml @@ -0,0 +1,28 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: simplest-test-prometheus-prometheus-role-binding + namespace: chainsaw-prometheus-workload +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: simplest-test-prometheus-prometheus-role +subjects: +- kind: ServiceAccount + name: simplest-collector + namespace: chainsaw-prometheus-receiver +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: simplest-test-prometheus-prometheus-role + namespace: chainsaw-prometheus-workload +rules: +- apiGroups: + - "" + resources: + - services + verbs: + - get + - watch + - list \ No newline at end of file diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/03-install.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/03-install.yaml new file mode 100644 index 0000000000..436ec899ab --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/03-install.yaml @@ -0,0 +1,25 @@ +apiVersion: opentelemetry.io/v1beta1 +kind: OpenTelemetryCollector +metadata: + name: simplest + namespace: chainsaw-prometheus-receiver +spec: + config: + receivers: + prometheus: + config: + scrape_configs: + - job_name: test-prometheus + kubernetes_sd_configs: + - role: service + namespaces: + names: + - chainsaw-prometheus-workload + exporters: + debug: {} + service: + pipelines: + metrics: + receivers: [prometheus] + processors: [] + exporters: [debug] diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/04-assert.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/04-assert.yaml new file mode 100644 index 0000000000..be1fe2032e --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/04-assert.yaml @@ -0,0 +1,29 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: simplest-test-prometheus-prometheus-role-binding + namespace: chainsaw-prometheus-workload +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: simplest-test-prometheus-prometheus-role +subjects: +- kind: ServiceAccount + name: simplest-collector + namespace: chainsaw-prometheus-receiver +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: simplest-test-prometheus-prometheus-role + namespace: chainsaw-prometheus-workload +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + verbs: + - get + - watch + - list \ No newline at end of file diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/04-install.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/04-install.yaml new file mode 100644 index 0000000000..552d86e474 --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/04-install.yaml @@ -0,0 +1,25 @@ +apiVersion: opentelemetry.io/v1beta1 +kind: OpenTelemetryCollector +metadata: + name: simplest + namespace: chainsaw-prometheus-receiver +spec: + config: + receivers: + prometheus: + config: + scrape_configs: + - job_name: test-prometheus + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - chainsaw-prometheus-workload + exporters: + debug: {} + service: + pipelines: + metrics: + receivers: [prometheus] + processors: [] + exporters: [debug] diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/05-assert.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/05-assert.yaml new file mode 100644 index 0000000000..7b42167122 --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/05-assert.yaml @@ -0,0 +1,28 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: simplest-test-prometheus-prometheus-role-binding + namespace: chainsaw-prometheus-workload +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: simplest-test-prometheus-prometheus-role +subjects: +- kind: ServiceAccount + name: simplest-collector + namespace: chainsaw-prometheus-receiver +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: simplest-test-prometheus-prometheus-role + namespace: chainsaw-prometheus-workload +rules: +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - watch + - list \ No newline at end of file diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/05-install.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/05-install.yaml new file mode 100644 index 0000000000..621c3c6ba0 --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/05-install.yaml @@ -0,0 +1,25 @@ +apiVersion: opentelemetry.io/v1beta1 +kind: OpenTelemetryCollector +metadata: + name: simplest + namespace: chainsaw-prometheus-receiver +spec: + config: + receivers: + prometheus: + config: + scrape_configs: + - job_name: test-prometheus + kubernetes_sd_configs: + - role: ingress + namespaces: + names: + - chainsaw-prometheus-workload + exporters: + debug: {} + service: + pipelines: + metrics: + receivers: [prometheus] + processors: [] + exporters: [debug] diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/06-delete.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/06-delete.yaml new file mode 100644 index 0000000000..455e637877 --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/06-delete.yaml @@ -0,0 +1,5 @@ +apiVersion: opentelemetry.io/v1beta1 +kind: OpenTelemetryCollector +metadata: + name: simplest + namespace: chainsaw-prometheus-receiver diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/06-error.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/06-error.yaml new file mode 100644 index 0000000000..3b9c716887 --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/06-error.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: simplest-test-prometheus-prometheus-role-binding + namespace: chainsaw-prometheus-workload +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: simplest-test-prometheus-prometheus-role + namespace: chainsaw-prometheus-workload diff --git a/tests/e2e-automatic-rbac/receiver-prometheus/chainsaw-test.yaml b/tests/e2e-automatic-rbac/receiver-prometheus/chainsaw-test.yaml new file mode 100644 index 0000000000..d5cfe20b00 --- /dev/null +++ b/tests/e2e-automatic-rbac/receiver-prometheus/chainsaw-test.yaml @@ -0,0 +1,48 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: prometheus-receiver +spec: + steps: + - name: create namespaces + try: + - apply: + file: 00-install.yaml + - name: pod-role + try: + - apply: + file: 01-install.yaml + - assert: + file: 01-assert.yaml + - name: node-role + try: + - apply: + file: 02-install.yaml + - assert: + file: 02-assert.yaml + - name: service-role + try: + - apply: + file: 03-install.yaml + - assert: + file: 03-assert.yaml + - name: endpoint-role + try: + - apply: + file: 04-install.yaml + - assert: + file: 04-assert.yaml + - name: ingress-role + try: + - apply: + file: 05-install.yaml + - assert: + file: 05-assert.yaml + - name: check-removed + try: + - delete: + file: 06-delete.yaml + - error: + file: 06-error.yaml From 8c15d2638ff3b7de3a20177a9420e064fed1444a Mon Sep 17 00:00:00 2001 From: Israel Blancas Date: Thu, 9 Jan 2025 12:27:53 +0100 Subject: [PATCH 2/2] Fix labels Signed-off-by: Israel Blancas --- internal/manifests/collector/rbac.go | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/internal/manifests/collector/rbac.go b/internal/manifests/collector/rbac.go index 6649054487..c0102ad18d 100644 --- a/internal/manifests/collector/rbac.go +++ b/internal/manifests/collector/rbac.go @@ -97,9 +97,19 @@ func Role(params manifests.Params) ([]client.Object, error) { return nil, err } + name := naming.Role(params.OtelCol.Name, params.OtelCol.Namespace) + + labels := manifestutils.Labels(params.OtelCol.ObjectMeta, name, params.OtelCol.Spec.Image, ComponentOpenTelemetryCollector, params.Config.LabelsFilter()) + annotations, err := manifestutils.Annotations(params.OtelCol, params.Config.AnnotationsFilter()) + if err != nil { + return nil, err + } + // Convert []*rbacv1.Role to []client.Object result := make([]client.Object, len(roles)) for i, role := range roles { + role.ObjectMeta.Labels = labels + role.ObjectMeta.Annotations = annotations result[i] = role } @@ -114,20 +124,20 @@ func RoleBinding(params manifests.Params) ([]client.Object, error) { return nil, nil } + name := naming.RoleBinding(params.OtelCol.Name, params.OtelCol.Namespace) + + labels := manifestutils.Labels(params.OtelCol.ObjectMeta, name, params.OtelCol.Spec.Image, ComponentOpenTelemetryCollector, params.Config.LabelsFilter()) annotations, err := manifestutils.Annotations(params.OtelCol, params.Config.AnnotationsFilter()) if err != nil { return nil, err } - for _, rb := range rbs { - rb.ObjectMeta.Labels = manifestutils.Labels(params.OtelCol.ObjectMeta, rb.ObjectMeta.Name, params.OtelCol.Spec.Image, ComponentOpenTelemetryCollector, params.Config.LabelsFilter()) - rb.ObjectMeta.Annotations = annotations - } - // Convert []*rbacv1.RoleBinding to []client.Object result := make([]client.Object, len(rbs)) - for i, role := range rbs { - result[i] = role + for i, rb := range rbs { + rb.ObjectMeta.Labels = labels + rb.ObjectMeta.Annotations = annotations + result[i] = rb } return result, nil