Skip to content

Commit c412807

Browse files
authored
pkg/helm: migrate to secret storage backend in namespace of CR (#1102)
* pkg/helm: migrate to secret storage backend in namespace of CR * CHANGELOG.md: added change for PR #1102
1 parent a38633d commit c412807

File tree

6 files changed

+62
-43
lines changed

6 files changed

+62
-43
lines changed

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,9 @@
44

55
### Changed
66

7+
- Updated the helm-operator to store release state in kubernetes secrets in the same namespace of the custom resource that defines the release. ([#1102](https://github.com/operator-framework/operator-sdk/pull/1102))
8+
- **WARNING**: Users with active CRs and releases who are upgrading their helm-based operator should not skip this version. Future versions will not seamlessly transition release state to the persistent backend, and will instead uninstall and reinstall all managed releases.
9+
710
### Deprecated
811

912
### Removed

Gopkg.lock

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pkg/helm/controller/reconcile.go

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,12 @@ func (r HelmOperatorReconciler) Reconcile(request reconcile.Request) (reconcile.
7575
return reconcile.Result{}, err
7676
}
7777

78-
manager := r.ManagerFactory.NewManager(o)
78+
manager, err := r.ManagerFactory.NewManager(o)
79+
if err != nil {
80+
log.Error(err, "Failed to get release manager")
81+
return reconcile.Result{}, err
82+
}
83+
7984
status := types.StatusFor(o)
8085
log = log.WithValues("release", manager.ReleaseName())
8186

pkg/helm/release/manager.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,10 @@ func (m manager) IsUpdateRequired() bool {
9494
// Sync ensures the Helm storage backend is in sync with the status of the
9595
// custom resource.
9696
func (m *manager) Sync(ctx context.Context) error {
97+
// TODO: We're now persisting releases as secrets. To support seamless upgrades, we
98+
// need to sync the release status from the CR to the persistent storage backend.
99+
// Once we release the storage backend migration, this function (and comment)
100+
// can be removed.
97101
if err := m.syncReleaseStatus(*m.status); err != nil {
98102
return fmt.Errorf("failed to sync release status to storage backend: %s", err)
99103
}

pkg/helm/release/manager_factory.go

Lines changed: 47 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -20,17 +20,20 @@ import (
2020

2121
"github.com/martinlindhe/base36"
2222
"github.com/pborman/uuid"
23-
2423
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2524
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
2625
apitypes "k8s.io/apimachinery/pkg/types"
2726
clientset "k8s.io/client-go/kubernetes"
27+
"k8s.io/client-go/kubernetes/typed/core/v1"
2828
helmengine "k8s.io/helm/pkg/engine"
2929
"k8s.io/helm/pkg/kube"
3030
"k8s.io/helm/pkg/storage"
31+
"k8s.io/helm/pkg/storage/driver"
3132
"k8s.io/helm/pkg/tiller"
3233
"k8s.io/helm/pkg/tiller/environment"
34+
crmanager "sigs.k8s.io/controller-runtime/pkg/manager"
3335

36+
"github.com/operator-framework/operator-sdk/pkg/helm/client"
3437
"github.com/operator-framework/operator-sdk/pkg/helm/engine"
3538
"github.com/operator-framework/operator-sdk/pkg/helm/internal/types"
3639
)
@@ -40,43 +43,51 @@ import (
4043
// improves decoupling between reconciliation logic and the Helm backend
4144
// components used to manage releases.
4245
type ManagerFactory interface {
43-
NewManager(r *unstructured.Unstructured) Manager
46+
NewManager(r *unstructured.Unstructured) (Manager, error)
4447
}
4548

4649
type managerFactory struct {
47-
storageBackend *storage.Storage
48-
tillerKubeClient *kube.Client
49-
chartDir string
50+
mgr crmanager.Manager
51+
chartDir string
5052
}
5153

5254
// NewManagerFactory returns a new Helm manager factory capable of installing and uninstalling releases.
53-
func NewManagerFactory(storageBackend *storage.Storage, tillerKubeClient *kube.Client, chartDir string) ManagerFactory {
54-
return &managerFactory{storageBackend, tillerKubeClient, chartDir}
55-
}
56-
57-
func (f managerFactory) NewManager(r *unstructured.Unstructured) Manager {
58-
return f.newManagerForCR(r)
55+
func NewManagerFactory(mgr crmanager.Manager, chartDir string) ManagerFactory {
56+
return &managerFactory{mgr, chartDir}
5957
}
6058

61-
func (f managerFactory) newManagerForCR(r *unstructured.Unstructured) Manager {
59+
func (f managerFactory) NewManager(cr *unstructured.Unstructured) (Manager, error) {
60+
clientv1, err := v1.NewForConfig(f.mgr.GetConfig())
61+
if err != nil {
62+
return nil, fmt.Errorf("failed to get core/v1 client: %s", err)
63+
}
64+
storageBackend := storage.Init(driver.NewSecrets(clientv1.Secrets(cr.GetNamespace())))
65+
tillerKubeClient, err := client.NewFromManager(f.mgr)
66+
if err != nil {
67+
return nil, fmt.Errorf("failed to get client from manager: %s", err)
68+
}
69+
releaseServer, err := getReleaseServer(cr, storageBackend, tillerKubeClient)
70+
if err != nil {
71+
return nil, fmt.Errorf("failed to get helm release server: %s", err)
72+
}
6273
return &manager{
63-
storageBackend: f.storageBackend,
64-
tillerKubeClient: f.tillerKubeClient,
74+
storageBackend: storageBackend,
75+
tillerKubeClient: tillerKubeClient,
6576
chartDir: f.chartDir,
6677

67-
tiller: f.tillerRendererForCR(r),
68-
releaseName: getReleaseName(r),
69-
namespace: r.GetNamespace(),
78+
tiller: releaseServer,
79+
releaseName: getReleaseName(cr),
80+
namespace: cr.GetNamespace(),
7081

71-
spec: r.Object["spec"],
72-
status: types.StatusFor(r),
73-
}
82+
spec: cr.Object["spec"],
83+
status: types.StatusFor(cr),
84+
}, nil
7485
}
7586

76-
// tillerRendererForCR creates a ReleaseServer configured with a rendering engine that adds ownerrefs to rendered assets
87+
// getReleaseServer creates a ReleaseServer configured with a rendering engine that adds ownerrefs to rendered assets
7788
// based on the CR.
78-
func (f managerFactory) tillerRendererForCR(r *unstructured.Unstructured) *tiller.ReleaseServer {
79-
controllerRef := metav1.NewControllerRef(r, r.GroupVersionKind())
89+
func getReleaseServer(cr *unstructured.Unstructured, storageBackend *storage.Storage, tillerKubeClient *kube.Client) (*tiller.ReleaseServer, error) {
90+
controllerRef := metav1.NewControllerRef(cr, cr.GroupVersionKind())
8091
ownerRefs := []metav1.OwnerReference{
8192
*controllerRef,
8293
}
@@ -87,17 +98,23 @@ func (f managerFactory) tillerRendererForCR(r *unstructured.Unstructured) *tille
8798
}
8899
env := &environment.Environment{
89100
EngineYard: ey,
90-
Releases: f.storageBackend,
91-
KubeClient: f.tillerKubeClient,
101+
Releases: storageBackend,
102+
KubeClient: tillerKubeClient,
103+
}
104+
kubeconfig, err := tillerKubeClient.ToRESTConfig()
105+
if err != nil {
106+
return nil, err
107+
}
108+
cs, err := clientset.NewForConfig(kubeconfig)
109+
if err != nil {
110+
return nil, err
92111
}
93-
kubeconfig, _ := f.tillerKubeClient.ToRESTConfig()
94-
cs := clientset.NewForConfigOrDie(kubeconfig)
95112

96-
return tiller.NewReleaseServer(env, cs, false)
113+
return tiller.NewReleaseServer(env, cs, false), nil
97114
}
98115

99-
func getReleaseName(r *unstructured.Unstructured) string {
100-
return fmt.Sprintf("%s-%s", r.GetName(), shortenUID(r.GetUID()))
116+
func getReleaseName(cr *unstructured.Unstructured) string {
117+
return fmt.Sprintf("%s-%s", cr.GetName(), shortenUID(cr.GetUID()))
101118
}
102119

103120
func shortenUID(uid apitypes.UID) string {

pkg/helm/run.go

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ import (
2020
"os"
2121
"runtime"
2222

23-
"github.com/operator-framework/operator-sdk/pkg/helm/client"
2423
"github.com/operator-framework/operator-sdk/pkg/helm/controller"
2524
hoflags "github.com/operator-framework/operator-sdk/pkg/helm/flags"
2625
"github.com/operator-framework/operator-sdk/pkg/helm/release"
@@ -30,8 +29,6 @@ import (
3029
sdkVersion "github.com/operator-framework/operator-sdk/version"
3130

3231
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
33-
"k8s.io/helm/pkg/storage"
34-
"k8s.io/helm/pkg/storage/driver"
3532
"sigs.k8s.io/controller-runtime/pkg/client/config"
3633
"sigs.k8s.io/controller-runtime/pkg/manager"
3734
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
@@ -77,14 +74,6 @@ func Run(flags *hoflags.HelmOperatorFlags) error {
7774
return err
7875
}
7976

80-
// Create Tiller's storage backend and kubernetes client
81-
storageBackend := storage.Init(driver.NewMemory())
82-
tillerKubeClient, err := client.NewFromManager(mgr)
83-
if err != nil {
84-
log.Error(err, "Failed to create new Tiller client.")
85-
return err
86-
}
87-
8877
watches, err := watches.Load(flags.WatchesFile)
8978
if err != nil {
9079
log.Error(err, "Failed to create new manager factories.")
@@ -96,7 +85,7 @@ func Run(flags *hoflags.HelmOperatorFlags) error {
9685
err := controller.Add(mgr, controller.WatchOptions{
9786
Namespace: namespace,
9887
GVK: w.GroupVersionKind,
99-
ManagerFactory: release.NewManagerFactory(storageBackend, tillerKubeClient, w.ChartDir),
88+
ManagerFactory: release.NewManagerFactory(mgr, w.ChartDir),
10089
ReconcilePeriod: flags.ReconcilePeriod,
10190
WatchDependentResources: w.WatchDependentResources,
10291
})

0 commit comments

Comments
 (0)