Skip to content

monitor secret resources ony if necessary #2550

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Mar 15, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 0 additions & 8 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,14 +52,6 @@ rules:
verbs:
- patch
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
Expand Down
3 changes: 2 additions & 1 deletion controllers/ingress/eventhandlers/secret_events.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ func (h *enqueueRequestsForSecretEvent) Delete(e event.DeleteEvent, _ workqueue.
}

func (h *enqueueRequestsForSecretEvent) Generic(e event.GenericEvent, _ workqueue.RateLimitingInterface) {
// we don't have any generic event for secrets.
secretObj := e.Object.(*corev1.Secret)
h.enqueueImpactedObjects(secretObj)
}

func (h *enqueueRequestsForSecretEvent) enqueueImpactedObjects(secret *corev1.Secret) {
Expand Down
13 changes: 8 additions & 5 deletions controllers/ingress/group_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ type groupReconciler struct {
stackMarshaller deploy.StackMarshaller
stackDeployer deploy.StackDeployer
backendSGProvider networkingpkg.BackendSGProvider
secretsManager k8s.SecretsManager

groupLoader ingress.GroupLoader
groupFinalizerManager ingress.FinalizerManager
Expand Down Expand Up @@ -160,7 +161,7 @@ func (r *groupReconciler) reconcile(ctx context.Context, req ctrl.Request) error
}

func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingress.Group) (core.Stack, *elbv2model.LoadBalancer, error) {
stack, lb, err := r.modelBuilder.Build(ctx, ingGroup)
stack, lb, secrets, err := r.modelBuilder.Build(ctx, ingGroup)
if err != nil {
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedBuildModel, fmt.Sprintf("Failed build model due to %v", err))
return nil, nil, err
Expand All @@ -177,6 +178,7 @@ func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingr
return nil, nil, err
}
r.logger.Info("successfully deployed model", "ingressGroup", ingGroup.ID)
r.secretsManager.MonitorSecrets(ingGroup.ID.String(), secrets)
return stack, lb, err
}

Expand Down Expand Up @@ -229,7 +231,7 @@ func (r *groupReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager
if err := r.setupIndexes(ctx, mgr.GetFieldIndexer(), ingressClassResourceAvailable); err != nil {
return err
}
if err := r.setupWatches(ctx, c, ingressClassResourceAvailable); err != nil {
if err := r.setupWatches(ctx, c, ingressClassResourceAvailable, clientSet); err != nil {
return err
}
return nil
Expand Down Expand Up @@ -276,9 +278,10 @@ func (r *groupReconciler) setupIndexes(ctx context.Context, fieldIndexer client.
return nil
}

func (r *groupReconciler) setupWatches(_ context.Context, c controller.Controller, ingressClassResourceAvailable bool) error {
func (r *groupReconciler) setupWatches(_ context.Context, c controller.Controller, ingressClassResourceAvailable bool, clientSet *kubernetes.Clientset) error {
ingEventChan := make(chan event.GenericEvent)
svcEventChan := make(chan event.GenericEvent)
secretEventsChan := make(chan event.GenericEvent)
ingEventHandler := eventhandlers.NewEnqueueRequestsForIngressEvent(r.groupLoader, r.eventRecorder,
r.logger.WithName("eventHandlers").WithName("ingress"))
svcEventHandler := eventhandlers.NewEnqueueRequestsForServiceEvent(ingEventChan, r.k8sClient, r.eventRecorder,
Expand All @@ -297,10 +300,9 @@ func (r *groupReconciler) setupWatches(_ context.Context, c controller.Controlle
if err := c.Watch(&source.Kind{Type: &corev1.Service{}}, svcEventHandler); err != nil {
return err
}
if err := c.Watch(&source.Kind{Type: &corev1.Secret{}}, secretEventHandler); err != nil {
if err := c.Watch(&source.Channel{Source: secretEventsChan}, secretEventHandler); err != nil {
return err
}

if ingressClassResourceAvailable {
ingClassEventChan := make(chan event.GenericEvent)
ingClassParamsEventHandler := eventhandlers.NewEnqueueRequestsForIngressClassParamsEvent(ingClassEventChan, r.k8sClient, r.eventRecorder,
Expand All @@ -317,6 +319,7 @@ func (r *groupReconciler) setupWatches(_ context.Context, c controller.Controlle
return err
}
}
r.secretsManager = k8s.NewSecretsManager(clientSet, secretEventsChan, ctrl.Log.WithName("secrets-manager"))
return nil
}

Expand Down
1 change: 1 addition & 0 deletions helm/aws-load-balancer-controller/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -234,3 +234,4 @@ The default values set by the application itself can be confirmed [here](https:/
| `serviceMonitor.enabled` | Specifies whether a service monitor should be created, requires the ServiceMonitor CRD to be installed | `false` |
| `serviceMonitor.additionalLabels` | Labels to add to the service account | `{}` |
| `serviceMonitor.interval` | Prometheus scrape interval | `1m` |
| `clusterSecretsPermissions.allowAllSecrets` | If `true`, controller has access to all secrets in the cluster. | `false` |
7 changes: 6 additions & 1 deletion helm/aws-load-balancer-controller/templates/rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,13 @@ rules:
resources: [services, ingresses]
verbs: [get, list, patch, update, watch]
- apiGroups: [""]
resources: [nodes, secrets, namespaces, endpoints]
resources: [nodes, namespaces, endpoints]
verbs: [get, list, watch]
{{- if .Values.clusterSecretsPermissions.allowAllSecrets }}
- apiGroups: [""]
resources: [secrets]
verbs: [get, list, watch]
{{- end }}
- apiGroups: ["elbv2.k8s.aws", "", "extensions", "networking.k8s.io"]
resources: [targetgroupbindings/status, pods/status, services/status, ingresses/status]
verbs: [update, patch]
Expand Down
9 changes: 9 additions & 0 deletions helm/aws-load-balancer-controller/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -266,3 +266,12 @@ serviceMonitor:
additionalLabels: {}
# Prometheus scrape interval
interval: 1m

# clusterSecretsPermissions lets you configure RBAC permissions for secret resources
# Access to secrets resource is required only if you use the OIDC feature, and instead of
# enabling access to all secrets, we recommend configuring namespaced role/rolebinding.
# This option is for backwards compatibility only, and will potentially be deprecated in future.
clusterSecretsPermissions:
# allowAllSecrets allows the controller to access all secrets in the cluster.
# This is to get backwards compatible behavior, but *NOT* recommended for security reasons
allowAllSecrets: false
5 changes: 4 additions & 1 deletion pkg/config/runtime_config.go
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
package config

import (
"time"

"github.com/spf13/pflag"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection/resourcelock"
ctrl "sigs.k8s.io/controller-runtime"
"time"
"sigs.k8s.io/controller-runtime/pkg/client"
)

const (
Expand Down Expand Up @@ -121,6 +123,7 @@ func BuildRuntimeOptions(rtCfg RuntimeConfig, scheme *runtime.Scheme) ctrl.Optio
LeaderElectionNamespace: rtCfg.LeaderElectionNamespace,
Namespace: rtCfg.WatchNamespace,
SyncPeriod: &rtCfg.SyncPeriod,
ClientDisableCacheFor: []client.Object{&corev1.Secret{}},
}
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/ingress/model_build_actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,6 @@ func (t *defaultModelBuildTask) buildAuthenticateOIDCAction(ctx context.Context,
if err := t.k8sClient.Get(ctx, secretKey, secret); err != nil {
return elbv2model.Action{}, err
}

rawClientID, ok := secret.Data["clientID"]
// AWSALBIngressController looks for clientId, we should be backwards-compatible here.
if !ok {
Expand All @@ -186,6 +185,7 @@ func (t *defaultModelBuildTask) buildAuthenticateOIDCAction(ctx context.Context,
return elbv2model.Action{}, errors.Errorf("missing clientSecret, secret: %v", secretKey)
}

t.secretKeys = append(t.secretKeys, secretKey)
clientID := strings.TrimRightFunc(string(rawClientID), unicode.IsSpace)
clientSecret := string(rawClientSecret)
return elbv2model.Action{
Expand Down
9 changes: 5 additions & 4 deletions pkg/ingress/model_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ const (
// ModelBuilder is responsible for build mode stack for a IngressGroup.
type ModelBuilder interface {
// build mode stack for a IngressGroup.
Build(ctx context.Context, ingGroup Group) (core.Stack, *elbv2model.LoadBalancer, error)
Build(ctx context.Context, ingGroup Group) (core.Stack, *elbv2model.LoadBalancer, []types.NamespacedName, error)
}

// NewDefaultModelBuilder constructs new defaultModelBuilder.
Expand Down Expand Up @@ -97,7 +97,7 @@ type defaultModelBuilder struct {
}

// build mode stack for a IngressGroup.
func (b *defaultModelBuilder) Build(ctx context.Context, ingGroup Group) (core.Stack, *elbv2model.LoadBalancer, error) {
func (b *defaultModelBuilder) Build(ctx context.Context, ingGroup Group) (core.Stack, *elbv2model.LoadBalancer, []types.NamespacedName, error) {
stack := core.NewDefaultStack(core.StackID(ingGroup.ID))
task := &defaultModelBuildTask{
k8sClient: b.k8sClient,
Expand Down Expand Up @@ -143,9 +143,9 @@ func (b *defaultModelBuilder) Build(ctx context.Context, ingGroup Group) (core.S
backendServices: make(map[types.NamespacedName]*corev1.Service),
}
if err := task.run(ctx); err != nil {
return nil, nil, err
return nil, nil, nil, err
}
return task.stack, task.loadBalancer, nil
return task.stack, task.loadBalancer, task.secretKeys, nil
}

// the default model build task
Expand Down Expand Up @@ -193,6 +193,7 @@ type defaultModelBuildTask struct {
loadBalancer *elbv2model.LoadBalancer
tgByResID map[string]*elbv2model.TargetGroup
backendServices map[types.NamespacedName]*corev1.Service
secretKeys []types.NamespacedName
}

func (t *defaultModelBuildTask) run(ctx context.Context) error {
Expand Down
2 changes: 1 addition & 1 deletion pkg/ingress/model_builder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3900,7 +3900,7 @@ func Test_defaultModelBuilder_Build(t *testing.T) {
defaultSSLPolicy: "ELBSecurityPolicy-2016-08",
}

gotStack, _, err := b.Build(context.Background(), tt.args.ingGroup)
gotStack, _, _, err := b.Build(context.Background(), tt.args.ingGroup)
if tt.wantErr != nil {
assert.EqualError(t, err, tt.wantErr.Error())
} else {
Expand Down
133 changes: 133 additions & 0 deletions pkg/k8s/secrets_manager.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
package k8s

import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/event"
"sync"

"github.com/go-logr/logr"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
)

// SecretsManager manages the secret resources needed by the controller
type SecretsManager interface {
// MonitorSecrets manages the individual watches for the given secrets
MonitorSecrets(ingressGroupID string, secrets []types.NamespacedName)
}

func NewSecretsManager(clientSet kubernetes.Interface, secretsEventChan chan<- event.GenericEvent, logger logr.Logger) *defaultSecretsManager {
return &defaultSecretsManager{
mutex: sync.Mutex{},
secretMap: make(map[types.NamespacedName]*secretItem),
secretsEventChan: secretsEventChan,
clientSet: clientSet,
logger: logger,
}
}

var _ SecretsManager = &defaultSecretsManager{}

type defaultSecretsManager struct {
mutex sync.Mutex
secretMap map[types.NamespacedName]*secretItem
secretsEventChan chan<- event.GenericEvent
clientSet kubernetes.Interface
queue workqueue.RateLimitingInterface
logger logr.Logger
}

type secretItem struct {
store cache.Store
rt *cache.Reflector
ingresses sets.String

stopCh chan struct{}
}

func (m *defaultSecretsManager) MonitorSecrets(ingressGroupID string, secrets []types.NamespacedName) {
m.logger.V(1).Info("Monitoring secrets", "groupID", ingressGroupID, "secrets", secrets)
m.mutex.Lock()
defer m.mutex.Unlock()

inputSecrets := make(sets.String)
for _, secret := range secrets {
inputSecrets.Insert(secret.String())
item, exists := m.secretMap[secret]
if !exists {
m.logger.V(1).Info("secret is not being monitored, adding watch", "item", secret)
item = m.newReflector(secret.Namespace, secret.Name)
m.secretMap[secret] = item
}
item.ingresses.Insert(ingressGroupID)
}

// Perform garbage collection
var cleanupSecrets []types.NamespacedName
for secret, secretItem := range m.secretMap {
if inputSecrets.Has(secret.String()) {
continue
}
if secretItem.ingresses.Has(ingressGroupID) {
secretItem.ingresses.Delete(ingressGroupID)
}
if secretItem.ingresses.Len() == 0 {
cleanupSecrets = append(cleanupSecrets, secret)
}
}
for _, secret := range cleanupSecrets {
m.logger.V(1).Info("secret no longer needs monitoring, stopping the watch", "item", secret)
m.secretMap[secret].stopReflector()
delete(m.secretMap, secret)
}
}

func (m *defaultSecretsManager) newReflector(namespace, name string) *secretItem {
fieldSelector := fields.Set{"metadata.name": name}.AsSelector().String()
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector
return m.clientSet.CoreV1().Secrets(namespace).List(context.TODO(), options)
}
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fieldSelector
return m.clientSet.CoreV1().Secrets(namespace).Watch(context.TODO(), options)
}
store := m.newStore()
rt := cache.NewNamedReflector(
fmt.Sprintf("secret-%s/%s", namespace, name),
&cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc},
&corev1.Secret{},
store,
0,
)
item := &secretItem{
store: store,
rt: rt,
ingresses: make(sets.String),
stopCh: make(chan struct{}),
}
go item.startReflector()
return item
}

func (m *defaultSecretsManager) newStore() *SecretsStore {
return NewSecretsStore(m.secretsEventChan, cache.MetaNamespaceKeyFunc, m.logger)
}

func (s *secretItem) stopReflector() {
close(s.stopCh)
}

func (s *secretItem) startReflector() {
s.rt.Run(s.stopCh)
}
Loading