@@ -21,14 +21,12 @@ import (
21
21
"fmt"
22
22
"time"
23
23
24
- "github.com/go-log/log/info"
25
24
"github.com/golang/glog"
26
25
27
26
corev1 "k8s.io/api/core/v1"
28
27
"k8s.io/apimachinery/pkg/api/equality"
29
28
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
30
29
errorutil "k8s.io/apimachinery/pkg/util/errors"
31
- "k8s.io/client-go/kubernetes"
32
30
"k8s.io/client-go/rest"
33
31
"k8s.io/client-go/tools/record"
34
32
@@ -41,8 +39,6 @@ import (
41
39
42
40
awsclient "sigs.k8s.io/cluster-api-provider-aws/pkg/client"
43
41
"sigs.k8s.io/controller-runtime/pkg/client"
44
-
45
- kubedrain "github.com/openshift/kubernetes-drain"
46
42
)
47
43
48
44
const (
@@ -55,8 +51,6 @@ const (
55
51
56
52
// MachineCreationFailed indicates that machine creation failed
57
53
MachineCreationFailed = "MachineCreationFailed"
58
- // ExcludeNodeDrainingAnnotation annotation explicitly skips node draining if set
59
- ExcludeNodeDrainingAnnotation = "machine.openshift.io/exclude-node-draining"
60
54
)
61
55
62
56
// Actuator is the AWS-specific actuator for the Cluster API machine controller
@@ -255,50 +249,6 @@ func (gl *glogLogger) Logf(format string, v ...interface{}) {
255
249
256
250
// DeleteMachine deletes an AWS instance
257
251
func (a * Actuator ) DeleteMachine (cluster * machinev1.Cluster , machine * machinev1.Machine ) error {
258
- // Drain node before deleting
259
- // If a machine is not linked to a node, just delete the machine. Since a node
260
- // can be unlinked from a machine when the node goes NotReady and is removed
261
- // by cloud controller manager. In that case some machines would never get
262
- // deleted without a manual intervention.
263
- if _ , exists := machine .ObjectMeta .Annotations [ExcludeNodeDrainingAnnotation ]; ! exists && machine .Status .NodeRef != nil {
264
- glog .Infof ("Draining node before delete" )
265
- if a .config == nil {
266
- err := fmt .Errorf ("missing client config, unable to build kube client" )
267
- glog .Error (err )
268
- return err
269
- }
270
- kubeClient , err := kubernetes .NewForConfig (a .config )
271
- if err != nil {
272
- return fmt .Errorf ("unable to build kube client: %v" , err )
273
- }
274
- node , err := kubeClient .CoreV1 ().Nodes ().Get (machine .Status .NodeRef .Name , metav1.GetOptions {})
275
- if err != nil {
276
- return fmt .Errorf ("unable to get node %q: %v" , machine .Status .NodeRef .Name , err )
277
- }
278
-
279
- if err := kubedrain .Drain (
280
- kubeClient ,
281
- []* corev1.Node {node },
282
- & kubedrain.DrainOptions {
283
- Force : true ,
284
- IgnoreDaemonsets : true ,
285
- DeleteLocalData : true ,
286
- GracePeriodSeconds : - 1 ,
287
- Logger : info .New (glog .V (0 )),
288
- // If a pod is not evicted in 20 second, retry the eviction next time the
289
- // machine gets reconciled again (to allow other machines to be reconciled)
290
- Timeout : 20 * time .Second ,
291
- },
292
- ); err != nil {
293
- // Machine still tries to terminate after drain failure
294
- glog .Warningf ("drain failed for machine %q: %v" , machine .Name , err )
295
- return & clustererror.RequeueAfterError {RequeueAfter : requeueAfterSeconds * time .Second }
296
- }
297
-
298
- glog .Infof ("drain successful for machine %q" , machine .Name )
299
- a .eventRecorder .Eventf (machine , corev1 .EventTypeNormal , "Deleted" , "Node %q drained" , node .Name )
300
- }
301
-
302
252
machineProviderConfig , err := providerConfigFromMachine (a .client , machine , a .codec )
303
253
if err != nil {
304
254
return a .handleMachineError (machine , apierrors .InvalidMachineConfiguration ("error decoding MachineProviderConfig: %v" , err ), deleteEventAction )
0 commit comments