Skip to content

✨ Allow removing individual informers from the cache (#935) #936

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pkg/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) {
// Construct a new Mapper if unset
if opts.Mapper == nil {
var err error
opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config)
opts.Mapper, err = apiutil.NewDynamicRESTMapper(config)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a good change but should be its own PR as it doesn't really have anything to do with this PR

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fair enough, I will move this into a separate PR. 👍🏻

if err != nil {
log.WithName("setup").Error(err, "Failed to get API Group-Resources")
return opts, fmt.Errorf("could not create RESTMapper from config")
Expand Down
30 changes: 30 additions & 0 deletions pkg/cache/informer_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"reflect"
"strings"

apierrors "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
Expand Down Expand Up @@ -159,6 +160,24 @@ func (ip *informerCache) GetInformer(ctx context.Context, obj runtime.Object) (I
return i.Informer, err
}

// GetInformerNonBlocking returns the informer for the obj without waiting for its cache to sync.
func (ip *informerCache) GetInformerNonBlocking(obj runtime.Object) (Informer, error) {
gvk, err := apiutil.GVKForObject(obj, ip.Scheme)
if err != nil {
return nil, err
}

// Use a cancelled context to signal non-blocking
ctx, cancel := context.WithCancel(context.Background())
cancel()

_, i, err := ip.InformersMap.Get(ctx, gvk, obj)
if err != nil && !apierrors.IsTimeout(err) {
return nil, err
}
return i.Informer, nil
}

// NeedLeaderElection implements the LeaderElectionRunnable interface
// to indicate that this can be started without requiring the leader lock
func (ip *informerCache) NeedLeaderElection() bool {
Expand Down Expand Up @@ -216,3 +235,14 @@ func indexByField(indexer Informer, field string, extractor client.IndexerFunc)

return indexer.AddIndexers(cache.Indexers{internal.FieldIndexName(field): indexFunc})
}

// Remove removes an informer specified by the obj argument from the cache and stops it if it existed.
func (ip *informerCache) Remove(obj runtime.Object) error {
gvk, err := apiutil.GVKForObject(obj, ip.Scheme)
if err != nil {
return err
}

ip.InformersMap.Remove(gvk, obj)
return nil
}
14 changes: 14 additions & 0 deletions pkg/cache/internal/deleg_map.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,20 @@ func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj
return m.structured.Get(ctx, gvk, obj)
}

// Remove will remove an new Informer from the InformersMap and stop it if it exists.
func (m *InformersMap) Remove(gvk schema.GroupVersionKind, obj runtime.Object) {
_, isUnstructured := obj.(*unstructured.Unstructured)
_, isUnstructuredList := obj.(*unstructured.UnstructuredList)
isUnstructured = isUnstructured || isUnstructuredList

switch {
case isUnstructured:
m.unstructured.Remove(gvk)
default:
m.structured.Remove(gvk)
}
}

// newStructuredInformersMap creates a new InformersMap for structured objects.
func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createStructuredListWatch)
Expand Down
42 changes: 37 additions & 5 deletions pkg/cache/internal/informers_map.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,9 @@ type MapEntry struct {

// CacheReader wraps Informer and implements the CacheReader interface for a single type
Reader CacheReader

// Stop can be used to stop this individual informer without stopping the entire specificInformersMap.
stop chan struct{}
}

// specificInformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs.
Expand Down Expand Up @@ -121,6 +124,17 @@ type specificInformersMap struct {
namespace string
}

// Start starts the informer managed by a MapEntry.
// Blocks until the informer stops. The informer can be stopped
// either individually (via the entry's stop channel) or globally
// via the provided stop argument.
func (e *MapEntry) Start(stop <-chan struct{}) {
// Stop on either the whole map stopping or just this informer being removed.
internalStop, cancel := anyOf(stop, e.stop)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

#1116 should help here

defer cancel()
e.Informer.Run(internalStop)
}

// Start calls Run on each of the informers and sets started to true. Blocks on the stop channel.
// It doesn't return start because it can't return an error, and it's not a runnable directly.
func (ip *specificInformersMap) Start(stop <-chan struct{}) {
Expand All @@ -132,8 +146,8 @@ func (ip *specificInformersMap) Start(stop <-chan struct{}) {
ip.stop = stop

// Start each informer
for _, informer := range ip.informersByGVK {
go informer.Informer.Run(stop)
for _, entry := range ip.informersByGVK {
go entry.Start(stop)
}

// Set started to true so we immediately start any informers added later.
Expand Down Expand Up @@ -183,8 +197,12 @@ func (ip *specificInformersMap) Get(ctx context.Context, gvk schema.GroupVersion

if started && !i.Informer.HasSynced() {
// Wait for it to sync before returning the Informer so that folks don't read from a stale cache.
if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) {
return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0)
// Cancel for context, informer stopping, or entire map stopping.
syncStop, cancel := mergeChan(ctx.Done(), i.stop, ip.stop)
defer cancel()
if !cache.WaitForCacheSync(syncStop, i.Informer.HasSynced) {
// Return entry even on timeout - caller may have intended a non-blocking fetch.
return started, i, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0)
}
}

Expand Down Expand Up @@ -214,18 +232,32 @@ func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, ob
i := &MapEntry{
Informer: ni,
Reader: CacheReader{indexer: ni.GetIndexer(), groupVersionKind: gvk},
stop: make(chan struct{}),
}
ip.informersByGVK[gvk] = i

// Start the Informer if need by
// TODO(seans): write thorough tests and document what happens here - can you add indexers?
// can you add eventhandlers?
if ip.started {
go i.Informer.Run(ip.stop)
go i.Start(ip.stop)
}
return i, ip.started, nil
}

// Remove removes an informer entry and stops it if it was running.
func (ip *specificInformersMap) Remove(gvk schema.GroupVersionKind) {
ip.mu.Lock()
defer ip.mu.Unlock()

entry, ok := ip.informersByGVK[gvk]
if !ok {
return
}
close(entry.stop)
delete(ip.informersByGVK, gvk)
}

// newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer.
func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) {
// Kubernetes APIs work against Resources, not GroupVersionKinds. Map the
Expand Down
14 changes: 14 additions & 0 deletions pkg/cache/internal/internal_suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
package internal

import (
"testing"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
)

func TestCacheInternal(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t, "Cache Internal Suite", []Reporter{printer.NewlineReporter{}})
}
79 changes: 79 additions & 0 deletions pkg/cache/internal/sync.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
package internal

import (
"context"
"sync"
)

// anyOf returns a "done" channel that is closed when any of its input channels
// are closed or when the retuned cancel function is called, whichever comes first.
//
// The cancel function should always be called by the caller to ensure
// resources are properly released.
func anyOf(ch ...<-chan struct{}) (<-chan struct{}, context.CancelFunc) {
var once sync.Once
cancel := make(chan struct{})
cancelFunc := func() {
once.Do(func() {
close(cancel)
})
}
return anyInternal(append(ch, cancel)...), cancelFunc
}

func anyInternal(ch ...<-chan struct{}) <-chan struct{} {
switch len(ch) {
case 0:
return nil
case 1:
return ch[0]
}

done := make(chan struct{})
go func() {
defer close(done)

switch len(ch) {
case 2:
// This case saves a recursion + goroutine when there are exactly 2 channels.
select {
case <-ch[0]:
case <-ch[1]:
}
default:
// >=3 channels to merge
select {
case <-ch[0]:
case <-ch[1]:
case <-ch[2]:
case <-anyInternal(append(ch[3:], done)...):
}
}
}()

return done
}

// mergeChan returns a channel that is closed when any of the input channels are signaled.
// The caller must call the returned CancelFunc to ensure no resources are leaked.
func mergeChan(a, b, c <-chan struct{}) (<-chan struct{}, context.CancelFunc) {
var once sync.Once
out := make(chan struct{})
cancel := make(chan struct{})
cancelFunc := func() {
once.Do(func() {
close(cancel)
})
}
go func() {
defer close(out)
select {
case <-a:
case <-b:
case <-c:
case <-cancel:
}
}()

return out, cancelFunc
}
71 changes: 71 additions & 0 deletions pkg/cache/internal/sync_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
package internal

import (
"context"
"fmt"
"time"

. "github.com/onsi/ginkgo"
)

var _ = Describe("anyOf", func() {
// Generate contexts for different number of input channels
for n := 0; n < 4; n++ {
n := n
Context(fmt.Sprintf("with %d channels", n), func() {
var (
channels []chan struct{}
done <-chan struct{}
cancel context.CancelFunc
)
BeforeEach(func() {
channels = make([]chan struct{}, n)
in := make([]<-chan struct{}, n)
for i := 0; i < n; i++ {
ch := make(chan struct{})
channels[i] = ch
in[i] = ch
}
done, cancel = anyOf(in...)
})
AfterEach(func() {
cancel()
})

It("isn't closed initially", func() {
select {
case <-done:
Fail("done was closed before cancel")
case <-time.After(5 * time.Millisecond):
// Ok.
}
})

// Verify that done is closed when we call cancel explicitly.
It("closes when cancelled", func() {
cancel()
select {
case <-done:
// Ok.
case <-time.After(5 * time.Millisecond):
Fail("timed out waiting for cancel")
}
})

// Generate test cases for closing each individual channel.
// Verify that done is closed in response.
for i := 0; i < n; i++ {
i := i
It(fmt.Sprintf("closes when channel %d is closed", i), func() {
close(channels[i])
select {
case <-done:
// Ok.
case <-time.After(5 * time.Millisecond):
Fail("timed out waiting for cancel")
}
})
}
})
}
})