Skip to content

pkg/leader: allow setting the pod name explicitly. #617

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 23, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion pkg/leader/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,14 @@ The lock record in this case is a ConfigMap whose OwnerReference is set to the
Pod that is the leader. When the leader is destroyed, the ConfigMap gets
garbage-collected, enabling a different candidate Pod to become the leader.

Leader for Life requires that all candidate Pods be in the same Namespace.
Leader for Life requires that all candidate Pods be in the same Namespace. It
uses the downwards API to determine the pod name, as hostname is not reliable.
You should run it configured with:

env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
*/
package leader
17 changes: 11 additions & 6 deletions pkg/leader/leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ package leader
import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
Expand All @@ -39,6 +40,8 @@ var errNoNS = errors.New("namespace not found for current environment")
// attempts to become the leader.
const maxBackoffInterval = time.Second * 16

const PodNameEnv = "POD_NAME"

// Become ensures that the current pod is the leader within its namespace. If
// run outside a cluster, it will skip leader election and return nil. It
// continuously tries to create a ConfigMap with the provided name and the
Expand Down Expand Up @@ -156,12 +159,14 @@ func myNS() (string, error) {

// myOwnerRef returns an OwnerReference that corresponds to the pod in which
// this code is currently running.
// It expects the environment variable POD_NAME to be set by the downwards API
func myOwnerRef(ctx context.Context, client crclient.Client, ns string) (*metav1.OwnerReference, error) {
hostname, err := os.Hostname()
if err != nil {
return nil, err
podName := os.Getenv(PodNameEnv)
if podName == "" {
return nil, fmt.Errorf("required env %s not set, please configure downward API", PodNameEnv)
}
logrus.Debugf("found hostname: %s", hostname)

logrus.Debugf("found podname: %s", podName)

myPod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{
Expand All @@ -170,8 +175,8 @@ func myOwnerRef(ctx context.Context, client crclient.Client, ns string) (*metav1
},
}

key := crclient.ObjectKey{Namespace: ns, Name: hostname}
err = client.Get(ctx, key, myPod)
key := crclient.ObjectKey{Namespace: ns, Name: podName}
err := client.Get(ctx, key, myPod)
if err != nil {
logrus.Errorf("failed to get pod: %v", err)
return nil, err
Expand Down
4 changes: 4 additions & 0 deletions pkg/scaffold/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,10 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: "{{.ProjectName}}"
`
4 changes: 4 additions & 0 deletions pkg/scaffold/operator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,10 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: "app-operator"
`