Skip to content

Commit 9b99679

Browse files
authored
Merge branch 'main' into TGB_readiness
2 parents 28a7d16 + 779508f commit 9b99679

30 files changed

+2830
-1428
lines changed

config/rbac/role.yaml

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -52,14 +52,6 @@ rules:
5252
verbs:
5353
- patch
5454
- update
55-
- apiGroups:
56-
- ""
57-
resources:
58-
- secrets
59-
verbs:
60-
- get
61-
- list
62-
- watch
6355
- apiGroups:
6456
- ""
6557
resources:

controllers/elbv2/eventhandlers/node.go

Lines changed: 39 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -55,47 +55,54 @@ func (h *enqueueRequestsForNodeEvent) Generic(e event.GenericEvent, queue workqu
5555
// nothing to do here
5656
}
5757

58-
// enqueueImpactedEndpointBindings will enqueue all impacted TargetGroupBindings for node events.
58+
// enqueueImpactedTargetGroupBindings will enqueue all impacted TargetGroupBindings for node events.
5959
func (h *enqueueRequestsForNodeEvent) enqueueImpactedTargetGroupBindings(queue workqueue.RateLimitingInterface, nodeOld *corev1.Node, nodeNew *corev1.Node) {
6060
var nodeKey types.NamespacedName
61-
nodeOldIsReady := false
62-
nodeNewIsReady := false
61+
nodeOldSuitableAsTrafficProxy := false
62+
nodeNewSuitableAsTrafficProxy := false
63+
nodeOldReadyCondStatus := corev1.ConditionFalse
64+
nodeNewReadyCondStatus := corev1.ConditionFalse
6365
if nodeOld != nil {
6466
nodeKey = k8s.NamespacedName(nodeOld)
65-
nodeOldIsReady = k8s.IsNodeSuitableAsTrafficProxy(nodeOld)
67+
nodeOldSuitableAsTrafficProxy = backend.IsNodeSuitableAsTrafficProxy(nodeOld)
68+
if readyCond := k8s.GetNodeCondition(nodeOld, corev1.NodeReady); readyCond != nil {
69+
nodeOldReadyCondStatus = readyCond.Status
70+
}
6671
}
6772
if nodeNew != nil {
6873
nodeKey = k8s.NamespacedName(nodeNew)
69-
nodeNewIsReady = k8s.IsNodeSuitableAsTrafficProxy(nodeNew)
74+
nodeNewSuitableAsTrafficProxy = backend.IsNodeSuitableAsTrafficProxy(nodeNew)
75+
if readyCond := k8s.GetNodeCondition(nodeNew, corev1.NodeReady); readyCond != nil {
76+
nodeNewReadyCondStatus = readyCond.Status
77+
}
7078
}
7179

7280
tgbList := &elbv2api.TargetGroupBindingList{}
7381
if err := h.k8sClient.List(context.Background(), tgbList); err != nil {
74-
h.logger.Error(err, "failed to fetch targetGroupBindings")
82+
h.logger.Error(err, "[this should never happen] failed to fetch targetGroupBindings")
7583
return
7684
}
7785

7886
for _, tgb := range tgbList.Items {
7987
if tgb.Spec.TargetType == nil || (*tgb.Spec.TargetType) != elbv2api.TargetTypeInstance {
8088
continue
8189
}
82-
8390
nodeSelector, err := backend.GetTrafficProxyNodeSelector(&tgb)
8491
if err != nil {
8592
h.logger.Error(err, "failed to get nodeSelector", "TargetGroupBinding", tgb)
86-
return
93+
continue
8794
}
8895

89-
nodeOldIsTrafficProxy := false
90-
nodeNewIsTrafficProxy := false
96+
nodeOldSuitableAsTrafficProxyForTGB := false
97+
nodeNewSuitableAsTrafficProxyForTGB := false
9198
if nodeOld != nil {
92-
nodeOldIsTrafficProxy = nodeOldIsReady && nodeSelector.Matches(labels.Set(nodeOld.Labels))
99+
nodeOldSuitableAsTrafficProxyForTGB = nodeOldSuitableAsTrafficProxy && nodeSelector.Matches(labels.Set(nodeOld.Labels))
93100
}
94101
if nodeNew != nil {
95-
nodeNewIsTrafficProxy = nodeNewIsReady && nodeSelector.Matches(labels.Set(nodeNew.Labels))
102+
nodeNewSuitableAsTrafficProxyForTGB = nodeNewSuitableAsTrafficProxy && nodeSelector.Matches(labels.Set(nodeNew.Labels))
96103
}
97104

98-
if nodeOldIsTrafficProxy != nodeNewIsTrafficProxy {
105+
if h.shouldEnqueueTGBDueToNodeEvent(nodeOldSuitableAsTrafficProxyForTGB, nodeOldReadyCondStatus, nodeNewSuitableAsTrafficProxyForTGB, nodeNewReadyCondStatus) {
99106
h.logger.V(1).Info("enqueue targetGroupBinding for node event",
100107
"node", nodeKey,
101108
"targetGroupBinding", k8s.NamespacedName(&tgb),
@@ -109,3 +116,22 @@ func (h *enqueueRequestsForNodeEvent) enqueueImpactedTargetGroupBindings(queue w
109116
}
110117
}
111118
}
119+
120+
// shouldEnqueueTGBDueToNodeEvent checks whether a TGB should be queued due to node event.
121+
func (h *enqueueRequestsForNodeEvent) shouldEnqueueTGBDueToNodeEvent(
122+
nodeOldSuitableAsTrafficProxyForTGB bool, nodeOldReadyCondStatus corev1.ConditionStatus,
123+
nodeNewSuitableAsTrafficProxyForTGB bool, nodeNewReadyCondStatus corev1.ConditionStatus) bool {
124+
if nodeOldSuitableAsTrafficProxyForTGB == false && nodeNewSuitableAsTrafficProxyForTGB == false {
125+
return false
126+
}
127+
if nodeOldSuitableAsTrafficProxyForTGB == true && nodeNewSuitableAsTrafficProxyForTGB == true {
128+
return nodeOldReadyCondStatus != nodeNewReadyCondStatus
129+
}
130+
if nodeOldSuitableAsTrafficProxyForTGB == true && nodeNewSuitableAsTrafficProxyForTGB == false {
131+
return nodeOldReadyCondStatus != corev1.ConditionFalse
132+
}
133+
if nodeOldSuitableAsTrafficProxyForTGB == false && nodeNewSuitableAsTrafficProxyForTGB == true {
134+
return nodeNewReadyCondStatus != corev1.ConditionFalse
135+
}
136+
return false
137+
}
Lines changed: 170 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,170 @@
1+
package eventhandlers
2+
3+
import (
4+
"github.com/stretchr/testify/assert"
5+
corev1 "k8s.io/api/core/v1"
6+
"testing"
7+
)
8+
9+
func Test_enqueueRequestsForNodeEvent_shouldEnqueueTGBDueToNodeEvent(t *testing.T) {
10+
type args struct {
11+
nodeOldSuitableAsTrafficProxyForTGB bool
12+
nodeOldReadyCondStatus corev1.ConditionStatus
13+
nodeNewSuitableAsTrafficProxyForTGB bool
14+
nodeNewReadyCondStatus corev1.ConditionStatus
15+
}
16+
tests := []struct {
17+
name string
18+
args args
19+
want bool
20+
}{
21+
{
22+
name: "suitable node changed from ready to notReady",
23+
args: args{
24+
nodeOldSuitableAsTrafficProxyForTGB: true,
25+
nodeOldReadyCondStatus: corev1.ConditionTrue,
26+
nodeNewSuitableAsTrafficProxyForTGB: true,
27+
nodeNewReadyCondStatus: corev1.ConditionFalse,
28+
},
29+
want: true,
30+
},
31+
{
32+
name: "suitable node changed from ready to unknown",
33+
args: args{
34+
nodeOldSuitableAsTrafficProxyForTGB: true,
35+
nodeOldReadyCondStatus: corev1.ConditionTrue,
36+
nodeNewSuitableAsTrafficProxyForTGB: true,
37+
nodeNewReadyCondStatus: corev1.ConditionUnknown,
38+
},
39+
want: true,
40+
},
41+
{
42+
name: "suitable node changed from notReady to ready",
43+
args: args{
44+
nodeOldSuitableAsTrafficProxyForTGB: true,
45+
nodeOldReadyCondStatus: corev1.ConditionFalse,
46+
nodeNewSuitableAsTrafficProxyForTGB: true,
47+
nodeNewReadyCondStatus: corev1.ConditionTrue,
48+
},
49+
want: true,
50+
},
51+
{
52+
name: "suitable node changed from notReady to unknown",
53+
args: args{
54+
nodeOldSuitableAsTrafficProxyForTGB: true,
55+
nodeOldReadyCondStatus: corev1.ConditionFalse,
56+
nodeNewSuitableAsTrafficProxyForTGB: true,
57+
nodeNewReadyCondStatus: corev1.ConditionUnknown,
58+
},
59+
want: true,
60+
},
61+
{
62+
name: "suitable node changed from unknown to ready",
63+
args: args{
64+
nodeOldSuitableAsTrafficProxyForTGB: true,
65+
nodeOldReadyCondStatus: corev1.ConditionUnknown,
66+
nodeNewSuitableAsTrafficProxyForTGB: true,
67+
nodeNewReadyCondStatus: corev1.ConditionTrue,
68+
},
69+
want: true,
70+
},
71+
{
72+
name: "suitable node changed from unknown to notReady",
73+
args: args{
74+
nodeOldSuitableAsTrafficProxyForTGB: true,
75+
nodeOldReadyCondStatus: corev1.ConditionUnknown,
76+
nodeNewSuitableAsTrafficProxyForTGB: true,
77+
nodeNewReadyCondStatus: corev1.ConditionFalse,
78+
},
79+
want: true,
80+
},
81+
{
82+
name: "suitable node remains ready",
83+
args: args{
84+
nodeOldSuitableAsTrafficProxyForTGB: true,
85+
nodeOldReadyCondStatus: corev1.ConditionTrue,
86+
nodeNewSuitableAsTrafficProxyForTGB: true,
87+
nodeNewReadyCondStatus: corev1.ConditionTrue,
88+
},
89+
want: false,
90+
},
91+
{
92+
name: "suitable node remains notReady",
93+
args: args{
94+
nodeOldSuitableAsTrafficProxyForTGB: true,
95+
nodeOldReadyCondStatus: corev1.ConditionFalse,
96+
nodeNewSuitableAsTrafficProxyForTGB: true,
97+
nodeNewReadyCondStatus: corev1.ConditionFalse,
98+
},
99+
want: false,
100+
},
101+
{
102+
name: "suitable node remains unknown",
103+
args: args{
104+
nodeOldSuitableAsTrafficProxyForTGB: true,
105+
nodeOldReadyCondStatus: corev1.ConditionUnknown,
106+
nodeNewSuitableAsTrafficProxyForTGB: true,
107+
nodeNewReadyCondStatus: corev1.ConditionUnknown,
108+
},
109+
want: false,
110+
},
111+
{
112+
name: "non-suitable node changed from ready to notReady",
113+
args: args{
114+
nodeOldSuitableAsTrafficProxyForTGB: false,
115+
nodeOldReadyCondStatus: corev1.ConditionTrue,
116+
nodeNewSuitableAsTrafficProxyForTGB: false,
117+
nodeNewReadyCondStatus: corev1.ConditionFalse,
118+
},
119+
want: false,
120+
},
121+
{
122+
name: "node became suitable while remains ready",
123+
args: args{
124+
nodeOldSuitableAsTrafficProxyForTGB: false,
125+
nodeOldReadyCondStatus: corev1.ConditionTrue,
126+
nodeNewSuitableAsTrafficProxyForTGB: true,
127+
nodeNewReadyCondStatus: corev1.ConditionTrue,
128+
},
129+
want: true,
130+
},
131+
{
132+
name: "node became suitable while remains notReady",
133+
args: args{
134+
nodeOldSuitableAsTrafficProxyForTGB: false,
135+
nodeOldReadyCondStatus: corev1.ConditionFalse,
136+
nodeNewSuitableAsTrafficProxyForTGB: true,
137+
nodeNewReadyCondStatus: corev1.ConditionFalse,
138+
},
139+
want: false,
140+
},
141+
{
142+
name: "node became non-suitable while remains ready",
143+
args: args{
144+
nodeOldSuitableAsTrafficProxyForTGB: true,
145+
nodeOldReadyCondStatus: corev1.ConditionTrue,
146+
nodeNewSuitableAsTrafficProxyForTGB: false,
147+
nodeNewReadyCondStatus: corev1.ConditionTrue,
148+
},
149+
want: true,
150+
},
151+
{
152+
name: "node became non-suitable while remains notReady",
153+
args: args{
154+
nodeOldSuitableAsTrafficProxyForTGB: true,
155+
nodeOldReadyCondStatus: corev1.ConditionFalse,
156+
nodeNewSuitableAsTrafficProxyForTGB: false,
157+
nodeNewReadyCondStatus: corev1.ConditionFalse,
158+
},
159+
want: false,
160+
},
161+
}
162+
for _, tt := range tests {
163+
t.Run(tt.name, func(t *testing.T) {
164+
h := &enqueueRequestsForNodeEvent{}
165+
got := h.shouldEnqueueTGBDueToNodeEvent(tt.args.nodeOldSuitableAsTrafficProxyForTGB, tt.args.nodeOldReadyCondStatus,
166+
tt.args.nodeNewSuitableAsTrafficProxyForTGB, tt.args.nodeNewReadyCondStatus)
167+
assert.Equal(t, tt.want, got)
168+
})
169+
}
170+
}

controllers/ingress/eventhandlers/secret_events.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,8 @@ func (h *enqueueRequestsForSecretEvent) Delete(e event.DeleteEvent, _ workqueue.
6363
}
6464

6565
func (h *enqueueRequestsForSecretEvent) Generic(e event.GenericEvent, _ workqueue.RateLimitingInterface) {
66-
// we don't have any generic event for secrets.
66+
secretObj := e.Object.(*corev1.Secret)
67+
h.enqueueImpactedObjects(secretObj)
6768
}
6869

6970
func (h *enqueueRequestsForSecretEvent) enqueueImpactedObjects(secret *corev1.Secret) {

controllers/ingress/group_controller.go

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ type groupReconciler struct {
9393
stackMarshaller deploy.StackMarshaller
9494
stackDeployer deploy.StackDeployer
9595
backendSGProvider networkingpkg.BackendSGProvider
96+
secretsManager k8s.SecretsManager
9697

9798
groupLoader ingress.GroupLoader
9899
groupFinalizerManager ingress.FinalizerManager
@@ -160,7 +161,7 @@ func (r *groupReconciler) reconcile(ctx context.Context, req ctrl.Request) error
160161
}
161162

162163
func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingress.Group) (core.Stack, *elbv2model.LoadBalancer, error) {
163-
stack, lb, err := r.modelBuilder.Build(ctx, ingGroup)
164+
stack, lb, secrets, err := r.modelBuilder.Build(ctx, ingGroup)
164165
if err != nil {
165166
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedBuildModel, fmt.Sprintf("Failed build model due to %v", err))
166167
return nil, nil, err
@@ -177,6 +178,7 @@ func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingr
177178
return nil, nil, err
178179
}
179180
r.logger.Info("successfully deployed model", "ingressGroup", ingGroup.ID)
181+
r.secretsManager.MonitorSecrets(ingGroup.ID.String(), secrets)
180182
return stack, lb, err
181183
}
182184

@@ -229,7 +231,7 @@ func (r *groupReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager
229231
if err := r.setupIndexes(ctx, mgr.GetFieldIndexer(), ingressClassResourceAvailable); err != nil {
230232
return err
231233
}
232-
if err := r.setupWatches(ctx, c, ingressClassResourceAvailable); err != nil {
234+
if err := r.setupWatches(ctx, c, ingressClassResourceAvailable, clientSet); err != nil {
233235
return err
234236
}
235237
return nil
@@ -276,9 +278,10 @@ func (r *groupReconciler) setupIndexes(ctx context.Context, fieldIndexer client.
276278
return nil
277279
}
278280

279-
func (r *groupReconciler) setupWatches(_ context.Context, c controller.Controller, ingressClassResourceAvailable bool) error {
281+
func (r *groupReconciler) setupWatches(_ context.Context, c controller.Controller, ingressClassResourceAvailable bool, clientSet *kubernetes.Clientset) error {
280282
ingEventChan := make(chan event.GenericEvent)
281283
svcEventChan := make(chan event.GenericEvent)
284+
secretEventsChan := make(chan event.GenericEvent)
282285
ingEventHandler := eventhandlers.NewEnqueueRequestsForIngressEvent(r.groupLoader, r.eventRecorder,
283286
r.logger.WithName("eventHandlers").WithName("ingress"))
284287
svcEventHandler := eventhandlers.NewEnqueueRequestsForServiceEvent(ingEventChan, r.k8sClient, r.eventRecorder,
@@ -297,10 +300,9 @@ func (r *groupReconciler) setupWatches(_ context.Context, c controller.Controlle
297300
if err := c.Watch(&source.Kind{Type: &corev1.Service{}}, svcEventHandler); err != nil {
298301
return err
299302
}
300-
if err := c.Watch(&source.Kind{Type: &corev1.Secret{}}, secretEventHandler); err != nil {
303+
if err := c.Watch(&source.Channel{Source: secretEventsChan}, secretEventHandler); err != nil {
301304
return err
302305
}
303-
304306
if ingressClassResourceAvailable {
305307
ingClassEventChan := make(chan event.GenericEvent)
306308
ingClassParamsEventHandler := eventhandlers.NewEnqueueRequestsForIngressClassParamsEvent(ingClassEventChan, r.k8sClient, r.eventRecorder,
@@ -317,6 +319,7 @@ func (r *groupReconciler) setupWatches(_ context.Context, c controller.Controlle
317319
return err
318320
}
319321
}
322+
r.secretsManager = k8s.NewSecretsManager(clientSet, secretEventsChan, ctrl.Log.WithName("secrets-manager"))
320323
return nil
321324
}
322325

docs/deploy/subnet_discovery.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,4 +31,4 @@ In version v2.1.1 and older, both the public and private subnets must be tagged
3131

3232
`${cluster-name}` is the name of the kubernetes cluster
3333

34-
The cluster tag is not required in v2.1.2 and newer releases.
34+
The cluster tag is not required in v2.1.2 and newer releases, unless a cluster tag for another cluster is present.

docs/guide/ingress/annotations.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ Traffic Routing can be controlled with following annotations:
244244
!!!example
245245
- response-503: return fixed 503 response
246246
- redirect-to-eks: redirect to an external url
247-
- forward-single-tg: forward to an single targetGroup [**simplified schema**]
247+
- forward-single-tg: forward to a single targetGroup [**simplified schema**]
248248
- forward-multiple-tg: forward to multiple targetGroups with different weights and stickiness config [**advanced schema**]
249249

250250
```yaml

helm/aws-load-balancer-controller/README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -234,3 +234,4 @@ The default values set by the application itself can be confirmed [here](https:/
234234
| `serviceMonitor.enabled` | Specifies whether a service monitor should be created, requires the ServiceMonitor CRD to be installed | `false` |
235235
| `serviceMonitor.additionalLabels` | Labels to add to the service account | `{}` |
236236
| `serviceMonitor.interval` | Prometheus scrape interval | `1m` |
237+
| `clusterSecretsPermissions.allowAllSecrets` | If `true`, controller has access to all secrets in the cluster. | `false` |

helm/aws-load-balancer-controller/templates/rbac.yaml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,8 +57,13 @@ rules:
5757
resources: [services, ingresses]
5858
verbs: [get, list, patch, update, watch]
5959
- apiGroups: [""]
60-
resources: [nodes, secrets, namespaces, endpoints]
60+
resources: [nodes, namespaces, endpoints]
6161
verbs: [get, list, watch]
62+
{{- if .Values.clusterSecretsPermissions.allowAllSecrets }}
63+
- apiGroups: [""]
64+
resources: [secrets]
65+
verbs: [get, list, watch]
66+
{{- end }}
6267
- apiGroups: ["elbv2.k8s.aws", "", "extensions", "networking.k8s.io"]
6368
resources: [targetgroupbindings/status, pods/status, services/status, ingresses/status]
6469
verbs: [update, patch]

0 commit comments

Comments
 (0)