mirror of
https://github.com/vmware-tanzu/pinniped.git
synced 2026-02-24 23:04:11 +00:00
Compare commits
46 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
454b792afb | ||
|
|
cb4085bfd9 | ||
|
|
9b0dc92025 | ||
|
|
7859a7b5c2 | ||
|
|
bdcf468e52 | ||
|
|
efaca05999 | ||
|
|
316e6171d4 | ||
|
|
04544b3d3c | ||
|
|
85102b0118 | ||
|
|
55de160551 | ||
|
|
cec9f3c4d7 | ||
|
|
16f562e81c | ||
|
|
92ccc0ec84 | ||
|
|
74175f2518 | ||
|
|
0a1ee9e37c | ||
|
|
05f5bac405 | ||
|
|
0195894a50 | ||
|
|
27c1d2144a | ||
|
|
88aba645b8 | ||
|
|
402c213183 | ||
|
|
17acc7caa6 | ||
|
|
6b7a230ca5 | ||
|
|
c7a8c429ed | ||
|
|
f0a1555aca | ||
|
|
ccd338fa50 | ||
|
|
4e7214c6b5 | ||
|
|
2297ee4b81 | ||
|
|
85daec4748 | ||
|
|
cf014656af | ||
|
|
b3b3c2303f | ||
|
|
0ff66c718b | ||
|
|
1bb8a43e04 | ||
|
|
655bbce42a | ||
|
|
9258745ec7 | ||
|
|
fcffab9a4c | ||
|
|
92f7f12bab | ||
|
|
7c40185676 | ||
|
|
abf19f649d | ||
|
|
0a2a716796 | ||
|
|
a27e398923 | ||
|
|
ba1470ea9d | ||
|
|
23fb84029b | ||
|
|
4cb0152ea1 | ||
|
|
42af8acd1e | ||
|
|
df014dadc3 | ||
|
|
bb657e7432 |
@@ -3,7 +3,7 @@
|
||||
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM golang:1.17.0 as build-env
|
||||
FROM golang:1.17.1 as build-env
|
||||
|
||||
WORKDIR /work
|
||||
COPY . .
|
||||
@@ -24,7 +24,7 @@ RUN \
|
||||
ln -s /usr/local/bin/pinniped-server /usr/local/bin/local-user-authenticator
|
||||
|
||||
# Use a distroless runtime image with CA certificates, timezone data, and not much else.
|
||||
FROM gcr.io/distroless/static:nonroot@sha256:c9f9b040044cc23e1088772814532d90adadfa1b86dcba17d07cb567db18dc4e
|
||||
FROM gcr.io/distroless/static:nonroot@sha256:be5d77c62dbe7fedfb0a4e5ec2f91078080800ab1f18358e5f31fcc8faa023c4
|
||||
|
||||
# Copy the server binary from the build-env stage.
|
||||
COPY --from=build-env /usr/local/bin /usr/local/bin
|
||||
|
||||
@@ -5,7 +5,6 @@ This is the current list of maintainers for the Pinniped project.
|
||||
| Maintainer | GitHub ID | Affiliation |
|
||||
| --------------- | --------- | ----------- |
|
||||
| Margo Crawford | [margocrawf](https://github.com/margocrawf) | [VMware](https://www.github.com/vmware/) |
|
||||
| Matt Moyer | [mattmoyer](https://github.com/mattmoyer) | [VMware](https://www.github.com/vmware/) |
|
||||
| Mo Khan | [enj](https://github.com/enj) | [VMware](https://www.github.com/vmware/) |
|
||||
| Anjali Telang | [anjaltelang](https://github.com/anjaltelang) | [VMware](https://www.github.com/vmware/) |
|
||||
| Ryan Richard | [cfryanr](https://github.com/cfryanr) | [VMware](https://www.github.com/vmware/) |
|
||||
@@ -14,11 +13,12 @@ This is the current list of maintainers for the Pinniped project.
|
||||
|
||||
* Andrew Keesler, [ankeesler](https://github.com/ankeesler)
|
||||
* Pablo Schuhmacher, [pabloschuhmacher](https://github.com/pabloschuhmacher)
|
||||
* Matt Moyer, [mattmoyer](https://github.com/mattmoyer)
|
||||
|
||||
## Pinniped Contributors & Stakeholders
|
||||
|
||||
| Feature Area | Lead |
|
||||
| ----------------------------- | :---------------------: |
|
||||
| Technical Lead | Matt Moyer (mattmoyer) |
|
||||
| Technical Lead | Mo Khan (enj) |
|
||||
| Product Management | Anjali Telang (anjaltelang) |
|
||||
| Community Management | Nanci Lancaster (microwavables) |
|
||||
|
||||
13
ROADMAP.md
13
ROADMAP.md
@@ -33,17 +33,16 @@ The following table includes the current roadmap for Pinniped. If you have any q
|
||||
|
||||
|
||||
|
||||
Last Updated: July 2021
|
||||
Theme|Description|Timeline|
|
||||
Last Updated: Sept 2021
|
||||
|Theme|Description|Timeline|
|
||||
|--|--|--|
|
||||
|Non-Interactive Password based OIDC logins |Support for non-interactive OIDC Logins via CLI using Password Grant |Aug 2021|
|
||||
|Active Directory Support|Extends upstream IDP protocols|Aug 2021|
|
||||
|Multiple IDP support|Support multiple IDPs configured on a single Supervisor|Sept 2021|
|
||||
|Wider Concierge cluster support|Support for more cluster types in the Concierge|Sept 2021|
|
||||
|Improving Security Posture|Supervisor token refresh fails when the upstream refresh token no longer works|Sept 2021|
|
||||
|Wider Concierge cluster support|Support for OpenShift cluster types in the Concierge|Sept 2021|
|
||||
|Multiple IDP support|Support multiple IDPs configured on a single Supervisor|Exploring/Ongoing|
|
||||
|Identity transforms|Support prefixing, filtering, or performing coarse-grained checks on upstream users and groups|Exploring/Ongoing|
|
||||
|CLI SSO|Support Kerberos based authentication on CLI |Exploring/Ongoing|
|
||||
|Extended IDP support|Support more types of identity providers on the Supervisor|Exploring/Ongoing|
|
||||
|Improved Documentation|Reorganizing and improving Pinniped docs; new how-to guides and tutorials|Exploring/Ongoing|
|
||||
|Improving Security Posture|Offer the best security posture for Kubernetes cluster authentication|Exploring/Ongoing|
|
||||
|Improve our CI/CD systems|Upgrade tests; make Kind more efficient and reliable for CI ; Windows tests; performance tests; scale tests; soak tests|Exploring/Ongoing|
|
||||
|CLI Improvements|Improving CLI UX for setting up Supervisor IDPs|Exploring/Ongoing|
|
||||
|Telemetry|Adding some useful phone home metrics as well as some vanity metrics|Exploring/Ongoing|
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("@ytt:json", "json")
|
||||
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel", "pinnipedDevAPIGroupWithPrefix")
|
||||
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel", "pinnipedDevAPIGroupWithPrefix")
|
||||
#@ load("@ytt:template", "template")
|
||||
|
||||
#@ if not data.values.into_namespace:
|
||||
---
|
||||
@@ -108,15 +109,20 @@ metadata:
|
||||
spec:
|
||||
replicas: #@ data.values.replicas
|
||||
selector:
|
||||
#! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades.
|
||||
matchLabels: #@ defaultLabel()
|
||||
template:
|
||||
metadata:
|
||||
labels: #@ defaultLabel()
|
||||
labels:
|
||||
#! This has always included defaultLabel(), which is used by this Deployment's selector.
|
||||
_: #@ template.replace(defaultLabel())
|
||||
#! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically
|
||||
#! without accidentally selecting any other Deployment's Pods, especially the kube cert agent Deployment's Pods.
|
||||
_: #@ template.replace(deploymentPodLabel())
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
spec:
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: #@ data.values.run_as_user
|
||||
runAsGroup: #@ data.values.run_as_group
|
||||
serviceAccountName: #@ defaultResourceName()
|
||||
@@ -132,6 +138,8 @@ spec:
|
||||
image: #@ data.values.image_repo + ":" + data.values.image_tag
|
||||
#@ end
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
@@ -148,10 +156,13 @@ spec:
|
||||
mountPath: /tmp
|
||||
- name: config-volume
|
||||
mountPath: /etc/config
|
||||
readOnly: true
|
||||
- name: podinfo
|
||||
mountPath: /etc/podinfo
|
||||
readOnly: true
|
||||
- name: impersonation-proxy
|
||||
mountPath: /var/run/secrets/impersonation-proxy.concierge.pinniped.dev/serviceaccount
|
||||
readOnly: true
|
||||
env:
|
||||
#@ if data.values.https_proxy:
|
||||
- name: HTTPS_PROXY
|
||||
@@ -185,7 +196,6 @@ spec:
|
||||
medium: Memory
|
||||
sizeLimit: 100Mi
|
||||
- name: config-volume
|
||||
readOnly: true
|
||||
configMap:
|
||||
name: #@ defaultResourceNameWithSuffix("config")
|
||||
- name: impersonation-proxy
|
||||
@@ -195,7 +205,6 @@ spec:
|
||||
- key: token
|
||||
path: token
|
||||
- name: podinfo
|
||||
readOnly: true
|
||||
downwardAPI:
|
||||
items:
|
||||
- path: "labels"
|
||||
@@ -223,7 +232,7 @@ spec:
|
||||
- weight: 50
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels: #@ defaultLabel()
|
||||
matchLabels: #@ deploymentPodLabel()
|
||||
topologyKey: kubernetes.io/hostname
|
||||
---
|
||||
apiVersion: v1
|
||||
@@ -233,9 +242,12 @@ metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("api")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
#! prevent kapp from altering the selector of our services to match kubectl behavior
|
||||
annotations:
|
||||
kapp.k14s.io/disable-default-label-scoping-rules: ""
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector: #@ defaultLabel()
|
||||
selector: #@ deploymentPodLabel()
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 443
|
||||
@@ -247,9 +259,12 @@ metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("proxy")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
#! prevent kapp from altering the selector of our services to match kubectl behavior
|
||||
annotations:
|
||||
kapp.k14s.io/disable-default-label-scoping-rules: ""
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector: #@ defaultLabel()
|
||||
selector: #@ deploymentPodLabel()
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 443
|
||||
|
||||
@@ -25,9 +25,14 @@
|
||||
#@ end
|
||||
|
||||
#@ def defaultLabel():
|
||||
#! Note that the name of this label's key is also assumed by kubecertagent.go and impersonator_config.go
|
||||
app: #@ data.values.app_name
|
||||
#@ end
|
||||
|
||||
#@ def deploymentPodLabel():
|
||||
deployment.pinniped.dev: concierge
|
||||
#@ end
|
||||
|
||||
#@ def labels():
|
||||
_: #@ template.replace(defaultLabel())
|
||||
_: #@ template.replace(data.values.custom_labels)
|
||||
|
||||
@@ -145,7 +145,7 @@ rules:
|
||||
#! We need to be able to create and update deployments in our namespace so we can manage the kube-cert-agent Deployment.
|
||||
- apiGroups: [ apps ]
|
||||
resources: [ deployments ]
|
||||
verbs: [ create, get, list, patch, update, watch ]
|
||||
verbs: [ create, get, list, patch, update, watch, delete ]
|
||||
#! We need to be able to get replicasets so we can form the correct owner references on our generated objects.
|
||||
- apiGroups: [ apps ]
|
||||
resources: [ replicasets ]
|
||||
|
||||
@@ -73,6 +73,9 @@ metadata:
|
||||
namespace: local-user-authenticator
|
||||
labels:
|
||||
app: local-user-authenticator
|
||||
#! prevent kapp from altering the selector of our services to match kubectl behavior
|
||||
annotations:
|
||||
kapp.k14s.io/disable-default-label-scoping-rules: ""
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("@ytt:json", "json")
|
||||
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel")
|
||||
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel")
|
||||
#@ load("@ytt:template", "template")
|
||||
|
||||
#@ if not data.values.into_namespace:
|
||||
---
|
||||
@@ -59,13 +60,18 @@ metadata:
|
||||
spec:
|
||||
replicas: #@ data.values.replicas
|
||||
selector:
|
||||
#! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades.
|
||||
matchLabels: #@ defaultLabel()
|
||||
template:
|
||||
metadata:
|
||||
labels: #@ defaultLabel()
|
||||
labels:
|
||||
#! This has always included defaultLabel(), which is used by this Deployment's selector.
|
||||
_: #@ template.replace(defaultLabel())
|
||||
#! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically
|
||||
#! without accidentally selecting pods from any future Deployments which might also want to use the defaultLabel().
|
||||
_: #@ template.replace(deploymentPodLabel())
|
||||
spec:
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: #@ data.values.run_as_user
|
||||
runAsGroup: #@ data.values.run_as_group
|
||||
serviceAccountName: #@ defaultResourceName()
|
||||
@@ -85,6 +91,8 @@ spec:
|
||||
- pinniped-supervisor
|
||||
- /etc/podinfo
|
||||
- /etc/config/pinniped.yaml
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
@@ -95,8 +103,10 @@ spec:
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config
|
||||
readOnly: true
|
||||
- name: podinfo
|
||||
mountPath: /etc/podinfo
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
@@ -131,11 +141,9 @@ spec:
|
||||
failureThreshold: 3
|
||||
volumes:
|
||||
- name: config-volume
|
||||
readOnly: true
|
||||
configMap:
|
||||
name: #@ defaultResourceNameWithSuffix("static-config")
|
||||
- name: podinfo
|
||||
readOnly: true
|
||||
downwardAPI:
|
||||
items:
|
||||
- path: "labels"
|
||||
@@ -155,5 +163,5 @@ spec:
|
||||
- weight: 50
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels: #@ defaultLabel()
|
||||
matchLabels: #@ deploymentPodLabel()
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
@@ -28,6 +28,10 @@
|
||||
app: #@ data.values.app_name
|
||||
#@ end
|
||||
|
||||
#@ def deploymentPodLabel():
|
||||
deployment.pinniped.dev: supervisor
|
||||
#@ end
|
||||
|
||||
#@ def labels():
|
||||
_: #@ template.replace(defaultLabel())
|
||||
_: #@ template.replace(data.values.custom_labels)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix")
|
||||
#@ load("helpers.lib.yaml", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix")
|
||||
|
||||
#@ if data.values.service_http_nodeport_port or data.values.service_https_nodeport_port:
|
||||
---
|
||||
@@ -12,10 +12,12 @@ metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("nodeport")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
#! prevent kapp from altering the selector of our services to match kubectl behavior
|
||||
annotations:
|
||||
kapp.k14s.io/disable-default-label-scoping-rules: ""
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
app: #@ data.values.app_name
|
||||
selector: #@ deploymentPodLabel()
|
||||
ports:
|
||||
#@ if data.values.service_http_nodeport_port:
|
||||
- name: http
|
||||
@@ -45,9 +47,12 @@ metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("clusterip")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
#! prevent kapp from altering the selector of our services to match kubectl behavior
|
||||
annotations:
|
||||
kapp.k14s.io/disable-default-label-scoping-rules: ""
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector: #@ defaultLabel()
|
||||
selector: #@ deploymentPodLabel()
|
||||
ports:
|
||||
#@ if data.values.service_http_clusterip_port:
|
||||
- name: http
|
||||
@@ -71,9 +76,12 @@ metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("loadbalancer")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
#! prevent kapp from altering the selector of our services to match kubectl behavior
|
||||
annotations:
|
||||
kapp.k14s.io/disable-default-label-scoping-rules: ""
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector: #@ defaultLabel()
|
||||
selector: #@ deploymentPodLabel()
|
||||
#@ if data.values.service_loadbalancer_ip:
|
||||
loadBalancerIP: #@ data.values.service_loadbalancer_ip
|
||||
#@ end
|
||||
|
||||
1
go.mod
1
go.mod
@@ -79,7 +79,6 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/uuid v1.1.2 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
|
||||
@@ -24,3 +24,18 @@ nodes:
|
||||
containerPort: 31235
|
||||
hostPort: 12346
|
||||
listenAddress: 127.0.0.1
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
kind: ClusterConfiguration
|
||||
apiServer:
|
||||
extraArgs:
|
||||
# To make sure the endpoints on our service are correct (this mostly matters for kubectl based
|
||||
# installs where kapp is not doing magic changes to the deployment and service selectors).
|
||||
# Setting this field to true makes it so that the API service will do the service cluster IP
|
||||
# to endpoint IP translations internally instead of relying on the network stack (i.e. kube-proxy).
|
||||
# The logic inside the API server is very straightforward - randomly pick an IP from the list
|
||||
# of available endpoints. This means that over time, all endpoints associated with the service
|
||||
# are exercised. For whatever reason, leaving this as false (i.e. use kube-proxy) appears to
|
||||
# hide some network misconfigurations when used internally by the API server aggregation layer.
|
||||
enable-aggregator-routing: "true"
|
||||
|
||||
@@ -219,8 +219,8 @@ ytt --file . \
|
||||
--data-value "image_repo=$registry_repo" \
|
||||
--data-value "image_tag=$tag" >"$manifest"
|
||||
|
||||
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
|
||||
kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest"
|
||||
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
@@ -238,8 +238,8 @@ ytt --file . \
|
||||
--data-value "pinny_bcrypt_passwd_hash=$(htpasswd -nbBC 10 x "$dex_test_password" | sed -e "s/^x://")" \
|
||||
>"$manifest"
|
||||
|
||||
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
|
||||
kapp deploy --yes --app tools --diff-changes --file "$manifest"
|
||||
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
@@ -281,6 +281,7 @@ ytt --file . \
|
||||
>"$manifest"
|
||||
|
||||
kapp deploy --yes --app "$supervisor_app_name" --diff-changes --file "$manifest"
|
||||
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
@@ -308,6 +309,7 @@ ytt --file . \
|
||||
--data-value "discovery_url=$discovery_url" >"$manifest"
|
||||
|
||||
kapp deploy --yes --app "$concierge_app_name" --diff-changes --file "$manifest"
|
||||
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
|
||||
@@ -48,7 +48,11 @@ const (
|
||||
// agentPodLabelKey is used to identify which pods are created by the kube-cert-agent
|
||||
// controllers.
|
||||
agentPodLabelKey = "kube-cert-agent.pinniped.dev"
|
||||
agentPodLabelValue = "v2"
|
||||
agentPodLabelValue = "v3"
|
||||
|
||||
// conciergeDefaultLabelKeyName is the name of the key of the label applied to all Concierge resources.
|
||||
// This name is determined in the YAML manifests, but this controller needs to treat it as a special case below.
|
||||
conciergeDefaultLabelKeyName = "app"
|
||||
|
||||
ClusterInfoNamespace = "kube-public"
|
||||
clusterInfoName = "cluster-info"
|
||||
@@ -84,10 +88,26 @@ type AgentConfig struct {
|
||||
DiscoveryURLOverride *string
|
||||
}
|
||||
|
||||
func (a *AgentConfig) agentLabels() map[string]string {
|
||||
// Only select using the unique label which will not match the pods of any other Deployment.
|
||||
// Older versions of Pinniped had multiple labels here.
|
||||
func (a *AgentConfig) agentPodSelectorLabels() map[string]string {
|
||||
return map[string]string{agentPodLabelKey: agentPodLabelValue}
|
||||
}
|
||||
|
||||
// Label the agent pod using the configured labels plus the unique label which we will use in the selector.
|
||||
func (a *AgentConfig) agentPodLabels() map[string]string {
|
||||
allLabels := map[string]string{agentPodLabelKey: agentPodLabelValue}
|
||||
for k, v := range a.Labels {
|
||||
allLabels[k] = v
|
||||
// Never label the agent pod with any label whose key is "app" because that could unfortunately match
|
||||
// the selector of the main Concierge Deployment. This is sadly inconsistent because all other resources
|
||||
// get labelled with the "app" label, but unfortunately the selector of the main Concierge Deployment is
|
||||
// an immutable field, so we cannot update it to make it use a more specific label without breaking upgrades.
|
||||
// Therefore, we take extra care here to avoid allowing the kube cert agent pods to match the selector of
|
||||
// the main Concierge Deployment. Note that older versions of Pinniped included this "app" label, so during
|
||||
// an upgrade we must take care to perform an update to remove it.
|
||||
if k != conciergeDefaultLabelKeyName {
|
||||
allLabels[k] = v
|
||||
}
|
||||
}
|
||||
return allLabels
|
||||
}
|
||||
@@ -236,7 +256,7 @@ func (c *agentController) Sync(ctx controllerlib.Context) error {
|
||||
return fmt.Errorf("could not get CredentialIssuer to update: %w", err)
|
||||
}
|
||||
|
||||
// Find the latest healthy kube-controller-manager Pod in kube-system..
|
||||
// Find the latest healthy kube-controller-manager Pod in kube-system.
|
||||
controllerManagerPods, err := c.kubeSystemPods.Lister().Pods(ControllerManagerNamespace).List(controllerManagerLabels)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("could not list controller manager pods: %w", err)
|
||||
@@ -336,6 +356,7 @@ func (c *agentController) loadSigningKey(agentPod *corev1.Pod) error {
|
||||
if err := c.dynamicCertProvider.SetCertKeyContent(certPEM, keyPEM); err != nil {
|
||||
return fmt.Errorf("failed to set signing cert/key content from agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||
}
|
||||
c.log.Info("successfully loaded signing key from agent pod into cache")
|
||||
|
||||
// Remember that we've successfully loaded the key from this pod so we can skip the exec+load if nothing has changed.
|
||||
c.execCache.Set(agentPod.UID, struct{}{}, 15*time.Minute)
|
||||
@@ -365,16 +386,42 @@ func (c *agentController) createOrUpdateDeployment(ctx controllerlib.Context, ne
|
||||
return err
|
||||
}
|
||||
|
||||
// Otherwise update the spec of the Deployment to match our desired state.
|
||||
// Update the spec of the Deployment to match our desired state.
|
||||
updatedDeployment := existingDeployment.DeepCopy()
|
||||
updatedDeployment.Spec = expectedDeployment.Spec
|
||||
updatedDeployment.ObjectMeta = mergeLabelsAndAnnotations(updatedDeployment.ObjectMeta, expectedDeployment.ObjectMeta)
|
||||
desireSelectorUpdate := !apiequality.Semantic.DeepEqual(updatedDeployment.Spec.Selector, existingDeployment.Spec.Selector)
|
||||
desireTemplateLabelsUpdate := !apiequality.Semantic.DeepEqual(updatedDeployment.Spec.Template.Labels, existingDeployment.Spec.Template.Labels)
|
||||
|
||||
// If the existing Deployment already matches our desired spec, we're done.
|
||||
if apiequality.Semantic.DeepDerivative(updatedDeployment, existingDeployment) {
|
||||
return nil
|
||||
// DeepDerivative allows the map fields of updatedDeployment to be a subset of existingDeployment,
|
||||
// but we want to check that certain of those map fields are exactly equal before deciding to skip the update.
|
||||
if !desireSelectorUpdate && !desireTemplateLabelsUpdate {
|
||||
return nil // already equal enough, so skip update
|
||||
}
|
||||
}
|
||||
|
||||
// Selector is an immutable field, so if we want to update it then we must delete and recreate the Deployment,
|
||||
// and then we're done. Older versions of Pinniped had multiple labels in the Selector, so to support upgrades from
|
||||
// those versions we take extra care to handle this case.
|
||||
if desireSelectorUpdate {
|
||||
log.Info("deleting deployment to update immutable Selector field")
|
||||
err = c.client.Kubernetes.AppsV1().Deployments(existingDeployment.Namespace).Delete(ctx.Context, existingDeployment.Name, metav1.DeleteOptions{
|
||||
Preconditions: &metav1.Preconditions{
|
||||
UID: &existingDeployment.UID,
|
||||
ResourceVersion: &existingDeployment.ResourceVersion,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("creating new deployment to update immutable Selector field")
|
||||
_, err = c.client.Kubernetes.AppsV1().Deployments(expectedDeployment.Namespace).Create(ctx.Context, expectedDeployment, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Otherwise, update the Deployment.
|
||||
log.Info("updating existing deployment")
|
||||
_, err = c.client.Kubernetes.AppsV1().Deployments(updatedDeployment.Namespace).Update(ctx.Context, updatedDeployment, metav1.UpdateOptions{})
|
||||
return err
|
||||
@@ -457,10 +504,10 @@ func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) *
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: pointer.Int32Ptr(1),
|
||||
Selector: metav1.SetAsLabelSelector(c.cfg.agentLabels()),
|
||||
Selector: metav1.SetAsLabelSelector(c.cfg.agentPodSelectorLabels()),
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: c.cfg.agentLabels(),
|
||||
Labels: c.cfg.agentPodLabels(),
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
TerminationGracePeriodSeconds: pointer.Int64Ptr(0),
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
"go.pinniped.dev/internal/testutil/testlogger"
|
||||
)
|
||||
|
||||
@@ -85,19 +86,18 @@ func TestAgentController(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "concierge",
|
||||
Name: "pinniped-concierge-kube-cert-agent",
|
||||
Labels: map[string]string{"extralabel": "labelvalue"},
|
||||
Labels: map[string]string{"extralabel": "labelvalue", "app": "anything"},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: pointer.Int32Ptr(1),
|
||||
Selector: metav1.SetAsLabelSelector(map[string]string{
|
||||
"extralabel": "labelvalue",
|
||||
"kube-cert-agent.pinniped.dev": "v2",
|
||||
"kube-cert-agent.pinniped.dev": "v3",
|
||||
}),
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"extralabel": "labelvalue",
|
||||
"kube-cert-agent.pinniped.dev": "v2",
|
||||
"kube-cert-agent.pinniped.dev": "v3",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
@@ -151,6 +151,19 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Older versions of Pinniped had a selector which included "app: app_name", e.g. "app: concierge".
|
||||
// Selector is an immutable field, but we want to support upgrading from those older versions anyway.
|
||||
oldStyleLabels := map[string]string{
|
||||
"app": "concierge",
|
||||
"extralabel": "labelvalue",
|
||||
"kube-cert-agent.pinniped.dev": "v2",
|
||||
}
|
||||
healthyAgentDeploymentWithOldStyleSelector := healthyAgentDeployment.DeepCopy()
|
||||
healthyAgentDeploymentWithOldStyleSelector.Spec.Selector = metav1.SetAsLabelSelector(oldStyleLabels)
|
||||
healthyAgentDeploymentWithOldStyleSelector.Spec.Template.ObjectMeta.Labels = oldStyleLabels
|
||||
healthyAgentDeploymentWithOldStyleSelector.UID = "fake-uid-abc123" // needs UID to test delete options
|
||||
healthyAgentDeploymentWithOldStyleSelector.ResourceVersion = "fake-resource-version-1234" // needs ResourceVersion to test delete options
|
||||
|
||||
// The host network setting from the kube-controller-manager pod should be applied on the
|
||||
// deployment.
|
||||
healthyKubeControllerManagerPodWithHostNetwork := healthyKubeControllerManagerPod.DeepCopy()
|
||||
@@ -186,7 +199,7 @@ func TestAgentController(t *testing.T) {
|
||||
Namespace: "concierge",
|
||||
Name: "pinniped-concierge-kube-cert-agent-xyz-1234",
|
||||
UID: types.UID("pinniped-concierge-kube-cert-agent-xyz-1234-test-uid"),
|
||||
Labels: map[string]string{"kube-cert-agent.pinniped.dev": "v2"},
|
||||
Labels: map[string]string{"kube-cert-agent.pinniped.dev": "v3"},
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Hour)),
|
||||
},
|
||||
Spec: corev1.PodSpec{},
|
||||
@@ -227,6 +240,8 @@ func TestAgentController(t *testing.T) {
|
||||
alsoAllowUndesiredDistinctErrors []string
|
||||
wantDistinctLogs []string
|
||||
wantAgentDeployment *appsv1.Deployment
|
||||
wantDeploymentActionVerbs []string
|
||||
wantDeploymentDeleteActionOpts []metav1.DeleteOptions
|
||||
wantStrategy *configv1alpha1.CredentialIssuerStrategy
|
||||
}{
|
||||
{
|
||||
@@ -369,7 +384,8 @@ func TestAgentController(t *testing.T) {
|
||||
wantDistinctLogs: []string{
|
||||
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch", "create"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
@@ -417,7 +433,8 @@ func TestAgentController(t *testing.T) {
|
||||
wantDistinctLogs: []string{
|
||||
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeploymentWithDefaultedPaths,
|
||||
wantAgentDeployment: healthyAgentDeploymentWithDefaultedPaths,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch", "create"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
@@ -426,6 +443,111 @@ func TestAgentController(t *testing.T) {
|
||||
LastUpdateTime: metav1.NewTime(now),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "to support upgrade from old versions, update to immutable selector field of existing deployment causes delete and recreate, no running agent pods yet",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
healthyAgentDeploymentWithOldStyleSelector,
|
||||
pendingAgentPod,
|
||||
},
|
||||
wantDistinctErrors: []string{
|
||||
"could not find a healthy agent pod (1 candidate)",
|
||||
},
|
||||
wantDistinctLogs: []string{
|
||||
`kube-cert-agent-controller "level"=0 "msg"="deleting deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch", "delete", "create"}, // must recreate deployment when Selector field changes
|
||||
wantDeploymentDeleteActionOpts: []metav1.DeleteOptions{
|
||||
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
||||
},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: "could not find a healthy agent pod (1 candidate)",
|
||||
LastUpdateTime: metav1.NewTime(now),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "to support upgrade from old versions, update to immutable selector field of existing deployment causes delete and recreate, when delete fails",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
healthyAgentDeploymentWithOldStyleSelector,
|
||||
pendingAgentPod,
|
||||
},
|
||||
addKubeReactions: func(clientset *kubefake.Clientset) {
|
||||
clientset.PrependReactor("delete", "deployments", func(action coretesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, fmt.Errorf("some delete error")
|
||||
})
|
||||
},
|
||||
wantDistinctErrors: []string{
|
||||
"could not ensure agent deployment: some delete error",
|
||||
},
|
||||
wantDistinctLogs: []string{
|
||||
`kube-cert-agent-controller "level"=0 "msg"="deleting deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeploymentWithOldStyleSelector, // couldn't be deleted, so it didn't change
|
||||
// delete to try to recreate deployment when Selector field changes, but delete always fails, so keeps trying to delete
|
||||
wantDeploymentActionVerbs: []string{"list", "watch", "delete", "delete", "delete", "delete"},
|
||||
wantDeploymentDeleteActionOpts: []metav1.DeleteOptions{
|
||||
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
||||
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
||||
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
||||
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
||||
},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: "could not ensure agent deployment: some delete error",
|
||||
LastUpdateTime: metav1.NewTime(now),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "to support upgrade from old versions, update to immutable selector field of existing deployment causes delete and recreate, when delete succeeds but create fails",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
healthyAgentDeploymentWithOldStyleSelector,
|
||||
pendingAgentPod,
|
||||
},
|
||||
addKubeReactions: func(clientset *kubefake.Clientset) {
|
||||
clientset.PrependReactor("create", "deployments", func(action coretesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, fmt.Errorf("some create error")
|
||||
})
|
||||
},
|
||||
wantDistinctErrors: []string{
|
||||
"could not ensure agent deployment: some create error",
|
||||
},
|
||||
wantDistinctLogs: []string{
|
||||
`kube-cert-agent-controller "level"=0 "msg"="deleting deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||
},
|
||||
wantAgentDeployment: nil, // was deleted, but couldn't be recreated
|
||||
// delete to try to recreate deployment when Selector field changes, but create always fails, so keeps trying to recreate
|
||||
wantDeploymentActionVerbs: []string{"list", "watch", "delete", "create", "create", "create", "create"},
|
||||
wantDeploymentDeleteActionOpts: []metav1.DeleteOptions{
|
||||
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
||||
},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: "could not ensure agent deployment: some create error",
|
||||
LastUpdateTime: metav1.NewTime(now),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update to existing deployment, no running agent pods yet",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
@@ -462,7 +584,8 @@ func TestAgentController(t *testing.T) {
|
||||
wantDistinctLogs: []string{
|
||||
`kube-cert-agent-controller "level"=0 "msg"="updating existing deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeploymentWithExtraLabels,
|
||||
wantAgentDeployment: healthyAgentDeploymentWithExtraLabels,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch", "update"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
@@ -484,7 +607,8 @@ func TestAgentController(t *testing.T) {
|
||||
wantDistinctErrors: []string{
|
||||
"failed to get kube-public/cluster-info configmap: configmap \"cluster-info\" not found",
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeploymentWithHostNetwork,
|
||||
wantAgentDeployment: healthyAgentDeploymentWithHostNetwork,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch", "update"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
@@ -509,7 +633,8 @@ func TestAgentController(t *testing.T) {
|
||||
wantDistinctErrors: []string{
|
||||
"failed to get kube-public/cluster-info configmap: configmap \"cluster-info\" not found",
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
@@ -535,7 +660,8 @@ func TestAgentController(t *testing.T) {
|
||||
wantDistinctErrors: []string{
|
||||
"could not extract Kubernetes API endpoint info from kube-public/cluster-info configmap: missing \"kubeconfig\" key",
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
@@ -561,7 +687,8 @@ func TestAgentController(t *testing.T) {
|
||||
wantDistinctErrors: []string{
|
||||
"could not extract Kubernetes API endpoint info from kube-public/cluster-info configmap: key \"kubeconfig\" does not contain a valid kubeconfig",
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
@@ -587,7 +714,8 @@ func TestAgentController(t *testing.T) {
|
||||
wantDistinctErrors: []string{
|
||||
"could not extract Kubernetes API endpoint info from kube-public/cluster-info configmap: kubeconfig in key \"kubeconfig\" does not contain any clusters",
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
@@ -615,7 +743,8 @@ func TestAgentController(t *testing.T) {
|
||||
wantDistinctErrors: []string{
|
||||
"could not exec into agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: some exec error",
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
@@ -643,7 +772,8 @@ func TestAgentController(t *testing.T) {
|
||||
wantDistinctErrors: []string{
|
||||
`failed to decode signing cert/key JSON from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: invalid character 'b' looking for beginning of value`,
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
@@ -671,7 +801,8 @@ func TestAgentController(t *testing.T) {
|
||||
wantDistinctErrors: []string{
|
||||
`failed to decode signing cert base64 from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: illegal base64 data at input byte 4`,
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
@@ -699,7 +830,8 @@ func TestAgentController(t *testing.T) {
|
||||
wantDistinctErrors: []string{
|
||||
`failed to decode signing key base64 from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: illegal base64 data at input byte 4`,
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
@@ -730,7 +862,8 @@ func TestAgentController(t *testing.T) {
|
||||
wantDistinctErrors: []string{
|
||||
"failed to set signing cert/key content from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: some dynamic cert error",
|
||||
},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
@@ -754,8 +887,9 @@ func TestAgentController(t *testing.T) {
|
||||
// If we pre-fill the cache here, we should never see any calls to the executor or dynamicCert mocks.
|
||||
execCache.Set(healthyAgentPod.UID, struct{}{}, 1*time.Hour)
|
||||
},
|
||||
wantDistinctErrors: []string{""},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDistinctErrors: []string{""},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.SuccessStrategyStatus,
|
||||
@@ -782,9 +916,13 @@ func TestAgentController(t *testing.T) {
|
||||
healthyAgentPod,
|
||||
validClusterInfoConfigMap,
|
||||
},
|
||||
mocks: mockExecSucceeds,
|
||||
wantDistinctErrors: []string{""},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
mocks: mockExecSucceeds,
|
||||
wantDistinctErrors: []string{""},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||
wantDistinctLogs: []string{
|
||||
`kube-cert-agent-controller "level"=0 "msg"="successfully loaded signing key from agent pod into cache"`,
|
||||
},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.SuccessStrategyStatus,
|
||||
@@ -811,10 +949,14 @@ func TestAgentController(t *testing.T) {
|
||||
healthyAgentPod,
|
||||
validClusterInfoConfigMap,
|
||||
},
|
||||
discoveryURLOverride: pointer.StringPtr("https://overridden-server.example.com/some/path"),
|
||||
mocks: mockExecSucceeds,
|
||||
wantDistinctErrors: []string{""},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
discoveryURLOverride: pointer.StringPtr("https://overridden-server.example.com/some/path"),
|
||||
mocks: mockExecSucceeds,
|
||||
wantDistinctErrors: []string{""},
|
||||
wantAgentDeployment: healthyAgentDeployment,
|
||||
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||
wantDistinctLogs: []string{
|
||||
`kube-cert-agent-controller "level"=0 "msg"="successfully loaded signing key from agent pod into cache"`,
|
||||
},
|
||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.SuccessStrategyStatus,
|
||||
@@ -843,6 +985,10 @@ func TestAgentController(t *testing.T) {
|
||||
if tt.addKubeReactions != nil {
|
||||
tt.addKubeReactions(kubeClientset)
|
||||
}
|
||||
|
||||
actualDeleteActionOpts := &[]metav1.DeleteOptions{}
|
||||
trackDeleteKubeClient := testutil.NewDeleteOptionsRecorder(kubeClientset, actualDeleteActionOpts)
|
||||
|
||||
kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0)
|
||||
log := testlogger.New(t)
|
||||
|
||||
@@ -863,10 +1009,16 @@ func TestAgentController(t *testing.T) {
|
||||
NamePrefix: "pinniped-concierge-kube-cert-agent-",
|
||||
ContainerImagePullSecrets: []string{"pinniped-image-pull-secret"},
|
||||
CredentialIssuerName: initialCredentialIssuer.Name,
|
||||
Labels: map[string]string{"extralabel": "labelvalue"},
|
||||
DiscoveryURLOverride: tt.discoveryURLOverride,
|
||||
Labels: map[string]string{
|
||||
"extralabel": "labelvalue",
|
||||
// The special label "app" should never be added to the Pods of the kube cert agent Deployment.
|
||||
// Older versions of Pinniped added this label, but it matches the Selector of the main
|
||||
// Concierge Deployment, so we do not want it to exist on the Kube cert agent pods.
|
||||
"app": "anything",
|
||||
},
|
||||
DiscoveryURLOverride: tt.discoveryURLOverride,
|
||||
},
|
||||
&kubeclient.Client{Kubernetes: kubeClientset, PinnipedConcierge: conciergeClientset},
|
||||
&kubeclient.Client{Kubernetes: trackDeleteKubeClient, PinnipedConcierge: conciergeClientset},
|
||||
kubeInformers.Core().V1().Pods(),
|
||||
kubeInformers.Apps().V1().Deployments(),
|
||||
kubeInformers.Core().V1().Pods(),
|
||||
@@ -894,6 +1046,20 @@ func TestAgentController(t *testing.T) {
|
||||
|
||||
assert.Equal(t, tt.wantDistinctLogs, deduplicate(log.Lines()), "unexpected logs")
|
||||
|
||||
// Assert on all actions that happened to deployments.
|
||||
var actualDeploymentActionVerbs []string
|
||||
for _, a := range kubeClientset.Actions() {
|
||||
if a.GetResource().Resource == "deployments" {
|
||||
actualDeploymentActionVerbs = append(actualDeploymentActionVerbs, a.GetVerb())
|
||||
}
|
||||
}
|
||||
if tt.wantDeploymentActionVerbs != nil {
|
||||
require.Equal(t, tt.wantDeploymentActionVerbs, actualDeploymentActionVerbs)
|
||||
}
|
||||
if tt.wantDeploymentDeleteActionOpts != nil {
|
||||
require.Equal(t, tt.wantDeploymentDeleteActionOpts, *actualDeleteActionOpts)
|
||||
}
|
||||
|
||||
// Assert that the agent deployment is in the expected final state.
|
||||
deployments, err := kubeClientset.AppsV1().Deployments("concierge").List(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -207,7 +207,7 @@ type UpstreamActiveDirectoryIdentityProviderICache interface {
|
||||
|
||||
type activeDirectoryWatcherController struct {
|
||||
cache UpstreamActiveDirectoryIdentityProviderICache
|
||||
validatedSecretVersionsCache *upstreamwatchers.SecretVersionCache
|
||||
validatedSecretVersionsCache upstreamwatchers.SecretVersionCacheI
|
||||
ldapDialer upstreamldap.LDAPDialer
|
||||
client pinnipedclientset.Interface
|
||||
activeDirectoryIdentityProviderInformer idpinformers.ActiveDirectoryIdentityProviderInformer
|
||||
@@ -238,7 +238,7 @@ func New(
|
||||
// For test dependency injection purposes.
|
||||
func newInternal(
|
||||
idpCache UpstreamActiveDirectoryIdentityProviderICache,
|
||||
validatedSecretVersionsCache *upstreamwatchers.SecretVersionCache,
|
||||
validatedSecretVersionsCache upstreamwatchers.SecretVersionCacheI,
|
||||
ldapDialer upstreamldap.LDAPDialer,
|
||||
client pinnipedclientset.Interface,
|
||||
activeDirectoryIdentityProviderInformer idpinformers.ActiveDirectoryIdentityProviderInformer,
|
||||
|
||||
@@ -370,7 +370,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
Conditions: allConditionsTrue(1234, "4242"),
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase, Generation: 1234}},
|
||||
},
|
||||
{
|
||||
name: "missing secret",
|
||||
@@ -555,7 +555,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase, Generation: 1234}},
|
||||
},
|
||||
{
|
||||
name: "sAMAccountName explicitly provided as group name attribute does not add an override",
|
||||
@@ -610,7 +610,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase, Generation: 1234}},
|
||||
},
|
||||
{
|
||||
name: "when TLS connection fails it tries to use StartTLS instead: without a specified port it automatically switches ports",
|
||||
@@ -670,7 +670,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.StartTLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.StartTLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase, Generation: 1234}},
|
||||
},
|
||||
{
|
||||
name: "when TLS connection fails it tries to use StartTLS instead: with a specified port it does not automatically switch ports",
|
||||
@@ -729,7 +729,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{},
|
||||
},
|
||||
{
|
||||
name: "non-nil TLS configuration with empty CertificateAuthorityData is valid",
|
||||
@@ -771,7 +771,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
Conditions: allConditionsTrue(1234, "4242"),
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase, Generation: 1234}},
|
||||
},
|
||||
{
|
||||
name: "one valid upstream and one invalid upstream updates the cache to include only the valid upstream",
|
||||
@@ -814,10 +814,12 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase, Generation: 1234}},
|
||||
},
|
||||
{
|
||||
name: "when testing the connection to the LDAP server fails then the upstream is still added to the cache anyway (treated like a warning)",
|
||||
name: "when testing the connection to the LDAP server fails then the upstream is still added to the cache anyway but not to validatedsettings (treated like a warning)",
|
||||
// If we can't connect, we can still try to allow users to log in, but update the conditions to say that there's a problem
|
||||
// Also don't add anything to the validated settings so that the next time this runs we can try again.
|
||||
inputUpstreams: []runtime.Object{validUpstream},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("")},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
@@ -849,10 +851,11 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{},
|
||||
},
|
||||
{
|
||||
name: "when testing the connection to the LDAP server fails, but later querying defaultsearchbase succeeds, then the upstream is still added to the cache anyway (treated like a warning)",
|
||||
// Add to cache but not to validatedSettings so we recheck next time
|
||||
inputUpstreams: []runtime.Object{editedValidUpstream(func(upstream *v1alpha1.ActiveDirectoryIdentityProvider) {
|
||||
upstream.Spec.UserSearch.Base = ""
|
||||
})},
|
||||
@@ -909,7 +912,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {UserSearchBase: exampleDefaultNamingContext, GroupSearchBase: testGroupSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{},
|
||||
},
|
||||
{
|
||||
name: "when testing the connection to the LDAP server fails, and querying defaultsearchbase fails, then the upstream is not added to the cache",
|
||||
@@ -945,7 +948,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {GroupSearchBase: testGroupSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{},
|
||||
},
|
||||
{
|
||||
name: "when the LDAP server connection was already validated using TLS for the current resource generation and secret version, then do not validate it again and keep using TLS",
|
||||
@@ -953,10 +956,11 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
upstream.Generation = 1234
|
||||
upstream.Status.Conditions = []v1alpha1.Condition{
|
||||
activeDirectoryConnectionValidTrueCondition(1234, "4242"),
|
||||
searchBaseFoundInConfigCondition(1234),
|
||||
}
|
||||
})},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase}},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase, Generation: 1234}},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
// Should not perform a test dial and bind. No mocking here means the test will fail if Bind() or Close() are called.
|
||||
},
|
||||
@@ -968,10 +972,12 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
Conditions: allConditionsTrue(1234, "4242"),
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase, Generation: 1234}},
|
||||
},
|
||||
{
|
||||
name: "when the LDAP server connection was already validated using TLS, but the search base wasn't, load TLS into the config and try again for the search base",
|
||||
name: "when the validated cache contains LDAP server info but the search base is empty, reload everything",
|
||||
// this is an invalid state that shouldn't happen now, but if it does we should consider the whole
|
||||
// validatedsettings cache invalid.
|
||||
inputUpstreams: []runtime.Object{editedValidUpstream(func(upstream *v1alpha1.ActiveDirectoryIdentityProvider) {
|
||||
upstream.Generation = 1234
|
||||
upstream.Status.Conditions = []v1alpha1.Condition{
|
||||
@@ -980,10 +986,10 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
upstream.Spec.UserSearch.Base = ""
|
||||
})},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS}},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, Generation: 1234}},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
conn.EXPECT().Bind(testBindUsername, testBindPassword).Times(1)
|
||||
conn.EXPECT().Close().Times(1)
|
||||
conn.EXPECT().Bind(testBindUsername, testBindPassword).Times(2)
|
||||
conn.EXPECT().Close().Times(2)
|
||||
conn.EXPECT().Search(expectedDefaultNamingContextSearch()).Return(exampleDefaultNamingContextSearchResult, nil).Times(1)
|
||||
},
|
||||
wantResultingCache: []*upstreamldap.ProviderConfig{
|
||||
@@ -1020,7 +1026,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: exampleDefaultNamingContext, GroupSearchBase: testGroupSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: exampleDefaultNamingContext, GroupSearchBase: testGroupSearchBase, Generation: 1234}},
|
||||
},
|
||||
{
|
||||
name: "when the LDAP server connection was already validated using TLS, and the search base was found, load TLS and search base info into the cache",
|
||||
@@ -1033,7 +1039,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
upstream.Spec.UserSearch.Base = ""
|
||||
})},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: exampleDefaultNamingContext, GroupSearchBase: testGroupSearchBase}},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: exampleDefaultNamingContext, GroupSearchBase: testGroupSearchBase, Generation: 1234}},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
},
|
||||
wantResultingCache: []*upstreamldap.ProviderConfig{
|
||||
@@ -1075,6 +1081,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: exampleDefaultNamingContext,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}},
|
||||
},
|
||||
{
|
||||
@@ -1083,10 +1090,11 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
upstream.Generation = 1234
|
||||
upstream.Status.Conditions = []v1alpha1.Condition{
|
||||
activeDirectoryConnectionValidTrueCondition(1234, "4242"),
|
||||
searchBaseFoundInConfigCondition(1234),
|
||||
}
|
||||
})},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.StartTLS}},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.StartTLS, Generation: 1234, UserSearchBase: testUserSearchBase, GroupSearchBase: testGroupSearchBase}},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
// Should not perform a test dial and bind. No mocking here means the test will fail if Bind() or Close() are called.
|
||||
},
|
||||
@@ -1103,6 +1111,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.StartTLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}},
|
||||
},
|
||||
{
|
||||
@@ -1119,6 +1128,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1233,
|
||||
}},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
// Should perform a test dial and bind.
|
||||
@@ -1138,6 +1148,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}},
|
||||
},
|
||||
{
|
||||
@@ -1156,7 +1167,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
}
|
||||
})},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "1", LDAPConnectionProtocol: upstreamldap.TLS}},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "1", LDAPConnectionProtocol: upstreamldap.TLS, Generation: 1234}},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
// Should perform a test dial and bind.
|
||||
conn.EXPECT().Bind(testBindUsername, testBindPassword).Times(1)
|
||||
@@ -1175,6 +1186,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}},
|
||||
},
|
||||
{
|
||||
@@ -1191,6 +1203,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}}, // old version was validated
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
// Should perform a test dial and bind.
|
||||
@@ -1210,6 +1223,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}},
|
||||
},
|
||||
{
|
||||
@@ -1261,6 +1275,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}},
|
||||
},
|
||||
{
|
||||
@@ -1311,7 +1326,13 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: exampleDefaultNamingContext, GroupSearchBase: exampleDefaultNamingContext}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {
|
||||
BindSecretResourceVersion: "4242",
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: exampleDefaultNamingContext,
|
||||
GroupSearchBase: exampleDefaultNamingContext,
|
||||
Generation: 1234,
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "when the input activedirectoryidentityprovider leaves user search base blank but provides group search base, query for defaultNamingContext",
|
||||
@@ -1360,7 +1381,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: exampleDefaultNamingContext, GroupSearchBase: testGroupSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: exampleDefaultNamingContext, GroupSearchBase: testGroupSearchBase, Generation: 1234}},
|
||||
},
|
||||
{
|
||||
name: "when the input activedirectoryidentityprovider leaves group search base blank but provides user search base, query for defaultNamingContext",
|
||||
@@ -1409,7 +1430,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: exampleDefaultNamingContext}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS, UserSearchBase: testUserSearchBase, GroupSearchBase: exampleDefaultNamingContext, Generation: 1234}},
|
||||
},
|
||||
{
|
||||
name: "when the input activedirectoryidentityprovider leaves group search base blank and query for defaultNamingContext fails",
|
||||
@@ -1437,10 +1458,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{
|
||||
testName: {BindSecretResourceVersion: "4242",
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{},
|
||||
},
|
||||
{
|
||||
name: "when query for defaultNamingContext returns empty string",
|
||||
@@ -1476,10 +1494,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{
|
||||
testName: {BindSecretResourceVersion: "4242",
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{},
|
||||
},
|
||||
{
|
||||
name: "when query for defaultNamingContext returns multiple entries",
|
||||
@@ -1521,10 +1536,7 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{
|
||||
testName: {BindSecretResourceVersion: "4242",
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{},
|
||||
},
|
||||
{
|
||||
name: "when query for defaultNamingContext returns no entries",
|
||||
@@ -1553,10 +1565,73 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{},
|
||||
},
|
||||
{
|
||||
name: "when search base was previously found but the bind secret has changed",
|
||||
inputUpstreams: []runtime.Object{editedValidUpstream(func(upstream *v1alpha1.ActiveDirectoryIdentityProvider) {
|
||||
upstream.Generation = 1234
|
||||
upstream.Status.Conditions = []v1alpha1.Condition{
|
||||
searchBaseFoundInRootDSECondition(1234),
|
||||
}
|
||||
upstream.Spec.UserSearch.Attributes = v1alpha1.ActiveDirectoryIdentityProviderUserSearchAttributes{}
|
||||
upstream.Spec.GroupSearch.Base = ""
|
||||
})},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {
|
||||
BindSecretResourceVersion: "4241",
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
// Should perform a test dial and bind.
|
||||
conn.EXPECT().Bind(testBindUsername, testBindPassword).Times(2)
|
||||
conn.EXPECT().Close().Times(2)
|
||||
conn.EXPECT().Search(expectedDefaultNamingContextSearch()).Return(exampleDefaultNamingContextSearchResult, nil).Times(1)
|
||||
},
|
||||
wantResultingCache: []*upstreamldap.ProviderConfig{
|
||||
{
|
||||
Name: testName,
|
||||
Host: testHost,
|
||||
ConnectionProtocol: upstreamldap.TLS,
|
||||
CABundle: testCABundle,
|
||||
BindUsername: testBindUsername,
|
||||
BindPassword: testBindPassword,
|
||||
UserSearch: upstreamldap.UserSearchConfig{
|
||||
Base: testUserSearchBase,
|
||||
Filter: testUserSearchFilter,
|
||||
UsernameAttribute: "userPrincipalName",
|
||||
UIDAttribute: "objectGUID",
|
||||
},
|
||||
GroupSearch: upstreamldap.GroupSearchConfig{
|
||||
Base: exampleDefaultNamingContext,
|
||||
Filter: testGroupSearchFilter,
|
||||
GroupNameAttribute: testGroupNameAttrName,
|
||||
},
|
||||
UIDAttributeParsingOverrides: map[string]func(*ldap.Entry) (string, error){"objectGUID": upstreamldap.MicrosoftUUIDFromBinary("objectGUID")},
|
||||
},
|
||||
},
|
||||
wantResultingUpstreams: []v1alpha1.ActiveDirectoryIdentityProvider{{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName, Generation: 1234},
|
||||
Status: v1alpha1.ActiveDirectoryIdentityProviderStatus{
|
||||
Phase: "Ready",
|
||||
Conditions: []v1alpha1.Condition{
|
||||
bindSecretValidTrueCondition(1234),
|
||||
activeDirectoryConnectionValidTrueCondition(1234, "4242"),
|
||||
searchBaseFoundInRootDSECondition(1234),
|
||||
tlsConfigurationValidLoadedTrueCondition(1234),
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{
|
||||
testName: {BindSecretResourceVersion: "4242",
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase}},
|
||||
GroupSearchBase: exampleDefaultNamingContext,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
Generation: 1234,
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1592,9 +1667,15 @@ func TestActiveDirectoryUpstreamWatcherControllerSync(t *testing.T) {
|
||||
return conn, nil
|
||||
})}
|
||||
|
||||
validatedSecretVersionCache := upstreamwatchers.NewSecretVersionCache()
|
||||
var validatedSecretVersionCache *upstreamwatchers.SecretVersionCache
|
||||
if tt.initialValidatedSettings != nil {
|
||||
validatedSecretVersionCache.ValidatedSettingsByName = tt.initialValidatedSettings
|
||||
validatedSecretVersionCache = &upstreamwatchers.SecretVersionCache{
|
||||
ValidatedSettingsByName: tt.initialValidatedSettings,
|
||||
}
|
||||
} else {
|
||||
validatedSecretVersionCache = &upstreamwatchers.SecretVersionCache{
|
||||
ValidatedSettingsByName: map[string]upstreamwatchers.ValidatedSettings{},
|
||||
}
|
||||
}
|
||||
|
||||
controller := newInternal(
|
||||
|
||||
@@ -134,7 +134,7 @@ type UpstreamLDAPIdentityProviderICache interface {
|
||||
|
||||
type ldapWatcherController struct {
|
||||
cache UpstreamLDAPIdentityProviderICache
|
||||
validatedSecretVersionsCache *upstreamwatchers.SecretVersionCache
|
||||
validatedSecretVersionsCache upstreamwatchers.SecretVersionCacheI
|
||||
ldapDialer upstreamldap.LDAPDialer
|
||||
client pinnipedclientset.Interface
|
||||
ldapIdentityProviderInformer idpinformers.LDAPIdentityProviderInformer
|
||||
@@ -165,7 +165,7 @@ func New(
|
||||
// For test dependency injection purposes.
|
||||
func newInternal(
|
||||
idpCache UpstreamLDAPIdentityProviderICache,
|
||||
validatedSecretVersionsCache *upstreamwatchers.SecretVersionCache,
|
||||
validatedSecretVersionsCache upstreamwatchers.SecretVersionCacheI,
|
||||
ldapDialer upstreamldap.LDAPDialer,
|
||||
client pinnipedclientset.Interface,
|
||||
ldapIdentityProviderInformer idpinformers.LDAPIdentityProviderInformer,
|
||||
|
||||
@@ -310,6 +310,7 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}},
|
||||
},
|
||||
{
|
||||
@@ -498,6 +499,7 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}}},
|
||||
{
|
||||
name: "when TLS connection fails it tries to use StartTLS instead: without a specified port it automatically switches ports",
|
||||
@@ -560,6 +562,7 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.StartTLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}}},
|
||||
{
|
||||
name: "when TLS connection fails it tries to use StartTLS instead: with a specified port it does not automatically switch ports",
|
||||
@@ -616,10 +619,7 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{},
|
||||
},
|
||||
{
|
||||
name: "non-nil TLS configuration with empty CertificateAuthorityData is valid",
|
||||
@@ -665,6 +665,7 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}},
|
||||
},
|
||||
{
|
||||
@@ -713,9 +714,10 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}}},
|
||||
{
|
||||
name: "when testing the connection to the LDAP server fails then the upstream is still added to the cache anyway (treated like a warning)",
|
||||
name: "when testing the connection to the LDAP server fails then the upstream is still added to the cache anyway (treated like a warning) but not the validated settings cache",
|
||||
inputUpstreams: []runtime.Object{validUpstream},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("")},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
@@ -746,10 +748,7 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{},
|
||||
},
|
||||
{
|
||||
name: "when the LDAP server connection was already validated using TLS for the current resource generation and secret version, then do not validate it again and keep using TLS",
|
||||
@@ -759,8 +758,14 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
ldapConnectionValidTrueCondition(1234, "4242"),
|
||||
}
|
||||
})},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS}},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{
|
||||
testName: {BindSecretResourceVersion: "4242",
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
// Should not perform a test dial and bind. No mocking here means the test will fail if Bind() or Close() are called.
|
||||
},
|
||||
@@ -777,6 +782,7 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}}},
|
||||
{
|
||||
name: "when the LDAP server connection was already validated using StartTLS for the current resource generation and secret version, then do not validate it again and keep using StartTLS",
|
||||
@@ -786,8 +792,14 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
ldapConnectionValidTrueCondition(1234, "4242"),
|
||||
}
|
||||
})},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.StartTLS}},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {
|
||||
BindSecretResourceVersion: "4242",
|
||||
LDAPConnectionProtocol: upstreamldap.StartTLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
// Should not perform a test dial and bind. No mocking here means the test will fail if Bind() or Close() are called.
|
||||
},
|
||||
@@ -804,6 +816,7 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.StartTLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}}},
|
||||
{
|
||||
name: "when the LDAP server connection was validated for an older resource generation, then try to validate it again",
|
||||
@@ -813,8 +826,14 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
ldapConnectionValidTrueCondition(1233, "4242"), // older spec generation!
|
||||
}
|
||||
})},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4242", LDAPConnectionProtocol: upstreamldap.TLS}},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {
|
||||
BindSecretResourceVersion: "4242",
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
Generation: 1233,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
}},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
// Should perform a test dial and bind.
|
||||
conn.EXPECT().Bind(testBindUsername, testBindPassword).Times(1)
|
||||
@@ -833,6 +852,7 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}}},
|
||||
{
|
||||
name: "when the LDAP server connection validation previously failed for this resource generation, then try to validate it again",
|
||||
@@ -849,8 +869,7 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
},
|
||||
}
|
||||
})},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "1", LDAPConnectionProtocol: upstreamldap.TLS}},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
// Should perform a test dial and bind.
|
||||
conn.EXPECT().Bind(testBindUsername, testBindPassword).Times(1)
|
||||
@@ -869,7 +888,49 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
}}},
|
||||
Generation: 1234,
|
||||
}}}, {
|
||||
name: "when the validated settings cache is incomplete, then try to validate it again",
|
||||
// this shouldn't happen, but if it does, just throw it out and try again.
|
||||
inputUpstreams: []runtime.Object{editedValidUpstream(func(upstream *v1alpha1.LDAPIdentityProvider) {
|
||||
upstream.Generation = 1234
|
||||
upstream.Status.Conditions = []v1alpha1.Condition{
|
||||
{
|
||||
Type: "LDAPConnectionValid",
|
||||
Status: "False", // failure!
|
||||
LastTransitionTime: now,
|
||||
Reason: "LDAPConnectionError",
|
||||
Message: "some-error-message",
|
||||
ObservedGeneration: 1234, // same (current) generation!
|
||||
},
|
||||
}
|
||||
})},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")},
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {
|
||||
BindSecretResourceVersion: "4242",
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
}},
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
// Should perform a test dial and bind.
|
||||
conn.EXPECT().Bind(testBindUsername, testBindPassword).Times(1)
|
||||
conn.EXPECT().Close().Times(1)
|
||||
},
|
||||
wantResultingCache: []*upstreamldap.ProviderConfig{providerConfigForValidUpstreamWithTLS},
|
||||
wantResultingUpstreams: []v1alpha1.LDAPIdentityProvider{{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName, Generation: 1234},
|
||||
Status: v1alpha1.LDAPIdentityProviderStatus{
|
||||
Phase: "Ready",
|
||||
Conditions: allConditionsTrue(1234, "4242"),
|
||||
},
|
||||
}},
|
||||
wantValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {
|
||||
BindSecretResourceVersion: "4242",
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "when the LDAP server connection was already validated for this resource generation but the bind secret has changed, then try to validate it again",
|
||||
inputUpstreams: []runtime.Object{editedValidUpstream(func(upstream *v1alpha1.LDAPIdentityProvider) {
|
||||
@@ -878,8 +939,14 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
ldapConnectionValidTrueCondition(1234, "4241"), // same spec generation, old secret version
|
||||
}
|
||||
})},
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")}, // newer secret version!
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {BindSecretResourceVersion: "4241", LDAPConnectionProtocol: upstreamldap.TLS}}, // old version was validated
|
||||
inputSecrets: []runtime.Object{validBindUserSecret("4242")}, // newer secret version!
|
||||
initialValidatedSettings: map[string]upstreamwatchers.ValidatedSettings{testName: {
|
||||
BindSecretResourceVersion: "4241",
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}}, // old version was validated
|
||||
setupMocks: func(conn *mockldapconn.MockConn) {
|
||||
// Should perform a test dial and bind.
|
||||
conn.EXPECT().Bind(testBindUsername, testBindPassword).Times(1)
|
||||
@@ -898,6 +965,7 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
LDAPConnectionProtocol: upstreamldap.TLS,
|
||||
UserSearchBase: testUserSearchBase,
|
||||
GroupSearchBase: testGroupSearchBase,
|
||||
Generation: 1234,
|
||||
}}},
|
||||
}
|
||||
|
||||
@@ -933,9 +1001,15 @@ func TestLDAPUpstreamWatcherControllerSync(t *testing.T) {
|
||||
return conn, nil
|
||||
})}
|
||||
|
||||
validatedSecretVersionCache := upstreamwatchers.NewSecretVersionCache()
|
||||
var validatedSecretVersionCache *upstreamwatchers.SecretVersionCache
|
||||
if tt.initialValidatedSettings != nil {
|
||||
validatedSecretVersionCache.ValidatedSettingsByName = tt.initialValidatedSettings
|
||||
validatedSecretVersionCache = &upstreamwatchers.SecretVersionCache{
|
||||
ValidatedSettingsByName: tt.initialValidatedSettings,
|
||||
}
|
||||
} else {
|
||||
validatedSecretVersionCache = &upstreamwatchers.SecretVersionCache{
|
||||
ValidatedSettingsByName: map[string]upstreamwatchers.ValidatedSettings{},
|
||||
}
|
||||
}
|
||||
|
||||
controller := newInternal(
|
||||
|
||||
@@ -46,19 +46,40 @@ const (
|
||||
|
||||
// An in-memory cache with an entry for each ActiveDirectoryIdentityProvider, to keep track of which ResourceVersion
|
||||
// of the bind Secret, which TLS/StartTLS setting was used and which search base was found during the most recent successful validation.
|
||||
type SecretVersionCacheI interface {
|
||||
Get(upstreamName, resourceVersion string, generation int64) (ValidatedSettings, bool)
|
||||
Set(upstreamName, resourceVersion string, generation int64, settings ValidatedSettings)
|
||||
}
|
||||
|
||||
type SecretVersionCache struct {
|
||||
ValidatedSettingsByName map[string]ValidatedSettings
|
||||
}
|
||||
|
||||
func (s *SecretVersionCache) Get(upstreamName, resourceVersion string, generation int64) (ValidatedSettings, bool) {
|
||||
validatedSettings := s.ValidatedSettingsByName[upstreamName]
|
||||
if validatedSettings.BindSecretResourceVersion == resourceVersion &&
|
||||
validatedSettings.Generation == generation && validatedSettings.UserSearchBase != "" &&
|
||||
validatedSettings.GroupSearchBase != "" && validatedSettings.LDAPConnectionProtocol != "" {
|
||||
return validatedSettings, true
|
||||
}
|
||||
return ValidatedSettings{}, false
|
||||
}
|
||||
|
||||
func (s *SecretVersionCache) Set(upstreamName, resourceVersion string, generation int64, settings ValidatedSettings) {
|
||||
s.ValidatedSettingsByName[upstreamName] = settings
|
||||
}
|
||||
|
||||
type ValidatedSettings struct {
|
||||
Generation int64
|
||||
BindSecretResourceVersion string
|
||||
LDAPConnectionProtocol upstreamldap.LDAPConnectionProtocol
|
||||
UserSearchBase string
|
||||
GroupSearchBase string
|
||||
}
|
||||
|
||||
func NewSecretVersionCache() *SecretVersionCache {
|
||||
return &SecretVersionCache{ValidatedSettingsByName: map[string]ValidatedSettings{}}
|
||||
func NewSecretVersionCache() SecretVersionCacheI {
|
||||
cache := SecretVersionCache{ValidatedSettingsByName: map[string]ValidatedSettings{}}
|
||||
return &cache
|
||||
}
|
||||
|
||||
// read only interface for sharing between ldap and active directory.
|
||||
@@ -167,37 +188,6 @@ func TestConnection(
|
||||
}
|
||||
}
|
||||
|
||||
func HasPreviousSuccessfulTLSConnectionConditionForCurrentSpecGenerationAndSecretVersion(secretVersionCache *SecretVersionCache, currentGeneration int64, upstreamStatusConditions []v1alpha1.Condition, upstreamName string, currentSecretVersion string, config *upstreamldap.ProviderConfig) bool {
|
||||
for _, cond := range upstreamStatusConditions {
|
||||
if cond.Type == typeLDAPConnectionValid && cond.Status == v1alpha1.ConditionTrue && cond.ObservedGeneration == currentGeneration {
|
||||
// Found a previously successful condition for the current spec generation.
|
||||
// Now figure out which version of the bind Secret was used during that previous validation, if any.
|
||||
validatedSecretVersion := secretVersionCache.ValidatedSettingsByName[upstreamName]
|
||||
if validatedSecretVersion.BindSecretResourceVersion == currentSecretVersion {
|
||||
// Reload the TLS vs StartTLS setting that was previously validated.
|
||||
config.ConnectionProtocol = validatedSecretVersion.LDAPConnectionProtocol
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func HasPreviousSuccessfulSearchBaseConditionForCurrentGeneration(secretVersionCache *SecretVersionCache, currentGeneration int64, upstreamStatusConditions []v1alpha1.Condition, upstreamName string, currentSecretVersion string, config *upstreamldap.ProviderConfig) bool {
|
||||
for _, cond := range upstreamStatusConditions {
|
||||
if cond.Type == TypeSearchBaseFound && cond.Status == v1alpha1.ConditionTrue && cond.ObservedGeneration == currentGeneration {
|
||||
// Found a previously successful condition for the current spec generation.
|
||||
// Now figure out which version of the bind Secret was used during that previous validation, if any.
|
||||
validatedSettings := secretVersionCache.ValidatedSettingsByName[upstreamName]
|
||||
// Reload the user search and group search base settings that were previously validated.
|
||||
config.UserSearch.Base = validatedSettings.UserSearchBase
|
||||
config.GroupSearch.Base = validatedSettings.GroupSearchBase
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func validTLSCondition(message string) *v1alpha1.Condition {
|
||||
return &v1alpha1.Condition{
|
||||
Type: typeTLSConfigurationValid,
|
||||
@@ -279,7 +269,7 @@ type GradatedCondition struct {
|
||||
isFatal bool
|
||||
}
|
||||
|
||||
func ValidateGenericLDAP(ctx context.Context, upstream UpstreamGenericLDAPIDP, secretInformer corev1informers.SecretInformer, validatedSecretVersionsCache *SecretVersionCache, config *upstreamldap.ProviderConfig) GradatedConditions {
|
||||
func ValidateGenericLDAP(ctx context.Context, upstream UpstreamGenericLDAPIDP, secretInformer corev1informers.SecretInformer, validatedSecretVersionsCache SecretVersionCacheI, config *upstreamldap.ProviderConfig) GradatedConditions {
|
||||
conditions := GradatedConditions{}
|
||||
secretValidCondition, currentSecretVersion := ValidateSecret(secretInformer, upstream.Spec().BindSecretName(), upstream.Namespace(), config)
|
||||
conditions.Append(secretValidCondition, true)
|
||||
@@ -301,35 +291,44 @@ func ValidateGenericLDAP(ctx context.Context, upstream UpstreamGenericLDAPIDP, s
|
||||
return conditions
|
||||
}
|
||||
|
||||
func validateAndSetLDAPServerConnectivityAndSearchBase(ctx context.Context, validatedSecretVersionsCache *SecretVersionCache, upstream UpstreamGenericLDAPIDP, config *upstreamldap.ProviderConfig, currentSecretVersion string) (*v1alpha1.Condition, *v1alpha1.Condition) {
|
||||
var ldapConnectionValidCondition *v1alpha1.Condition
|
||||
if !HasPreviousSuccessfulTLSConnectionConditionForCurrentSpecGenerationAndSecretVersion(validatedSecretVersionsCache, upstream.Generation(), upstream.Status().Conditions(), upstream.Name(), currentSecretVersion, config) {
|
||||
func validateAndSetLDAPServerConnectivityAndSearchBase(ctx context.Context, validatedSecretVersionsCache SecretVersionCacheI, upstream UpstreamGenericLDAPIDP, config *upstreamldap.ProviderConfig, currentSecretVersion string) (*v1alpha1.Condition, *v1alpha1.Condition) {
|
||||
// previouslyValidatedSecretVersion := validatedSecretVersionsCache.ValidatedSettingsByName[upstream.Name()].BindSecretResourceVersion
|
||||
// doesn't have an existing entry for ValidatedSettingsByName with this secret version ->
|
||||
// lets double check tls connection
|
||||
// if we can connect, put it in the secret cache
|
||||
// also we KNOW we need to recheck the search base stuff too... so they should all be one function?
|
||||
// but if tls validation fails no need to also try to get search base stuff?
|
||||
|
||||
validatedSettings, hasPreviousValidatedSettings := validatedSecretVersionsCache.Get(upstream.Name(), currentSecretVersion, upstream.Generation())
|
||||
var ldapConnectionValidCondition, searchBaseFoundCondition *v1alpha1.Condition
|
||||
if !hasPreviousValidatedSettings {
|
||||
testConnectionTimeout, cancelFunc := context.WithTimeout(ctx, probeLDAPTimeout)
|
||||
defer cancelFunc()
|
||||
|
||||
ldapConnectionValidCondition = TestConnection(testConnectionTimeout, upstream.Spec().BindSecretName(), config, currentSecretVersion)
|
||||
|
||||
if ldapConnectionValidCondition.Status == v1alpha1.ConditionTrue {
|
||||
// Remember (in-memory for this pod) that the controller has successfully validated the LDAP provider
|
||||
// using this version of the Secret. This is for performance reasons, to avoid attempting to connect to
|
||||
// the LDAP server more than is needed. If the pod restarts, it will attempt this validation again.
|
||||
validatedSecretVersionsCache.ValidatedSettingsByName[upstream.Name()] = ValidatedSettings{
|
||||
BindSecretResourceVersion: currentSecretVersion,
|
||||
LDAPConnectionProtocol: config.ConnectionProtocol,
|
||||
}
|
||||
}
|
||||
}
|
||||
var searchBaseFoundCondition *v1alpha1.Condition
|
||||
if !HasPreviousSuccessfulSearchBaseConditionForCurrentGeneration(validatedSecretVersionsCache, upstream.Generation(), upstream.Status().Conditions(), upstream.Name(), currentSecretVersion, config) {
|
||||
searchBaseTimeout, cancelFunc := context.WithTimeout(ctx, probeLDAPTimeout)
|
||||
defer cancelFunc()
|
||||
|
||||
searchBaseFoundCondition = upstream.Spec().DetectAndSetSearchBase(searchBaseTimeout, config)
|
||||
|
||||
validatedSettings := validatedSecretVersionsCache.ValidatedSettingsByName[upstream.Name()]
|
||||
validatedSettings.GroupSearchBase = config.GroupSearch.Base
|
||||
validatedSettings.UserSearchBase = config.UserSearch.Base
|
||||
validatedSecretVersionsCache.ValidatedSettingsByName[upstream.Name()] = validatedSettings
|
||||
if ldapConnectionValidCondition.Status == v1alpha1.ConditionTrue {
|
||||
// if it's nil, don't worry about the search base condition. But if it exists make sure the status is true.
|
||||
if searchBaseFoundCondition == nil || (searchBaseFoundCondition.Status == v1alpha1.ConditionTrue) {
|
||||
// Remember (in-memory for this pod) that the controller has successfully validated the LDAP provider
|
||||
// using this version of the Secret. This is for performance reasons, to avoid attempting to connect to
|
||||
// the LDAP server more than is needed. If the pod restarts, it will attempt this validation again.
|
||||
validatedSettings.LDAPConnectionProtocol = config.ConnectionProtocol
|
||||
validatedSettings.BindSecretResourceVersion = currentSecretVersion
|
||||
validatedSettings.Generation = upstream.Generation()
|
||||
validatedSettings.UserSearchBase = config.UserSearch.Base
|
||||
validatedSettings.GroupSearchBase = config.GroupSearch.Base
|
||||
validatedSecretVersionsCache.Set(upstream.Name(), currentSecretVersion, upstream.Generation(), validatedSettings)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
config.ConnectionProtocol = validatedSettings.LDAPConnectionProtocol
|
||||
config.UserSearch.Base = validatedSettings.UserSearchBase
|
||||
config.GroupSearch.Base = validatedSettings.GroupSearchBase
|
||||
}
|
||||
|
||||
return ldapConnectionValidCondition, searchBaseFoundCondition
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
@@ -28,6 +29,10 @@ func (c *clientWrapper) CoreV1() corev1client.CoreV1Interface {
|
||||
return &coreWrapper{CoreV1Interface: c.Interface.CoreV1(), opts: c.opts}
|
||||
}
|
||||
|
||||
func (c *clientWrapper) AppsV1() appsv1client.AppsV1Interface {
|
||||
return &appsWrapper{AppsV1Interface: c.Interface.AppsV1(), opts: c.opts}
|
||||
}
|
||||
|
||||
type coreWrapper struct {
|
||||
corev1client.CoreV1Interface
|
||||
opts *[]metav1.DeleteOptions
|
||||
@@ -41,6 +46,15 @@ func (c *coreWrapper) Secrets(namespace string) corev1client.SecretInterface {
|
||||
return &secretsWrapper{SecretInterface: c.CoreV1Interface.Secrets(namespace), opts: c.opts}
|
||||
}
|
||||
|
||||
type appsWrapper struct {
|
||||
appsv1client.AppsV1Interface
|
||||
opts *[]metav1.DeleteOptions
|
||||
}
|
||||
|
||||
func (c *appsWrapper) Deployments(namespace string) appsv1client.DeploymentInterface {
|
||||
return &deploymentsWrapper{DeploymentInterface: c.AppsV1Interface.Deployments(namespace), opts: c.opts}
|
||||
}
|
||||
|
||||
type podsWrapper struct {
|
||||
corev1client.PodInterface
|
||||
opts *[]metav1.DeleteOptions
|
||||
@@ -61,6 +75,16 @@ func (s *secretsWrapper) Delete(ctx context.Context, name string, opts metav1.De
|
||||
return s.SecretInterface.Delete(ctx, name, opts)
|
||||
}
|
||||
|
||||
type deploymentsWrapper struct {
|
||||
appsv1client.DeploymentInterface
|
||||
opts *[]metav1.DeleteOptions
|
||||
}
|
||||
|
||||
func (s *deploymentsWrapper) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
||||
*s.opts = append(*s.opts, opts)
|
||||
return s.DeploymentInterface.Delete(ctx, name, opts)
|
||||
}
|
||||
|
||||
func NewPreconditions(uid types.UID, rv string) metav1.DeleteOptions {
|
||||
return metav1.DeleteOptions{
|
||||
Preconditions: &metav1.Preconditions{
|
||||
|
||||
@@ -7,7 +7,7 @@ params:
|
||||
github_url: "https://github.com/vmware-tanzu/pinniped"
|
||||
slack_url: "https://kubernetes.slack.com/messages/pinniped"
|
||||
community_url: "https://go.pinniped.dev/community"
|
||||
latest_version: v0.10.0
|
||||
latest_version: v0.11.0
|
||||
pygmentsCodefences: true
|
||||
pygmentsStyle: "pygments"
|
||||
markup:
|
||||
|
||||
@@ -10,6 +10,147 @@ menu:
|
||||
parent: reference
|
||||
---
|
||||
|
||||
## pinniped completion bash
|
||||
|
||||
generate the autocompletion script for bash
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Generate the autocompletion script for the bash shell.
|
||||
|
||||
This script depends on the 'bash-completion' package.
|
||||
If it is not installed already, you can install it via your OS's package manager.
|
||||
|
||||
To load completions in your current shell session:
|
||||
$ source <(pinniped completion bash)
|
||||
|
||||
To load completions for every new session, execute once:
|
||||
Linux:
|
||||
$ pinniped completion bash > /etc/bash_completion.d/pinniped
|
||||
MacOS:
|
||||
$ pinniped completion bash > /usr/local/etc/bash_completion.d/pinniped
|
||||
|
||||
You will need to start a new shell for this setup to take effect.
|
||||
|
||||
|
||||
```
|
||||
pinniped completion bash
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for bash
|
||||
--no-descriptions disable completion descriptions
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [pinniped completion]() - generate the autocompletion script for the specified shell
|
||||
|
||||
## pinniped completion fish
|
||||
|
||||
generate the autocompletion script for fish
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Generate the autocompletion script for the fish shell.
|
||||
|
||||
To load completions in your current shell session:
|
||||
$ pinniped completion fish | source
|
||||
|
||||
To load completions for every new session, execute once:
|
||||
$ pinniped completion fish > ~/.config/fish/completions/pinniped.fish
|
||||
|
||||
You will need to start a new shell for this setup to take effect.
|
||||
|
||||
|
||||
```
|
||||
pinniped completion fish [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for fish
|
||||
--no-descriptions disable completion descriptions
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [pinniped completion]() - generate the autocompletion script for the specified shell
|
||||
|
||||
## pinniped completion powershell
|
||||
|
||||
generate the autocompletion script for powershell
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Generate the autocompletion script for powershell.
|
||||
|
||||
To load completions in your current shell session:
|
||||
PS C:\> pinniped completion powershell | Out-String | Invoke-Expression
|
||||
|
||||
To load completions for every new session, add the output of the above command
|
||||
to your powershell profile.
|
||||
|
||||
|
||||
```
|
||||
pinniped completion powershell [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for powershell
|
||||
--no-descriptions disable completion descriptions
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [pinniped completion]() - generate the autocompletion script for the specified shell
|
||||
|
||||
## pinniped completion zsh
|
||||
|
||||
generate the autocompletion script for zsh
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Generate the autocompletion script for the zsh shell.
|
||||
|
||||
If shell completion is not already enabled in your environment you will need
|
||||
to enable it. You can execute the following once:
|
||||
|
||||
$ echo "autoload -U compinit; compinit" >> ~/.zshrc
|
||||
|
||||
To load completions for every new session, execute once:
|
||||
# Linux:
|
||||
$ pinniped completion zsh > "${fpath[1]}/_pinniped"
|
||||
# macOS:
|
||||
$ pinniped completion zsh > /usr/local/share/zsh/site-functions/_pinniped
|
||||
|
||||
You will need to start a new shell for this setup to take effect.
|
||||
|
||||
|
||||
```
|
||||
pinniped completion zsh [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for zsh
|
||||
--no-descriptions disable completion descriptions
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [pinniped completion]() - generate the autocompletion script for the specified shell
|
||||
|
||||
## pinniped get kubeconfig
|
||||
|
||||
Generate a Pinniped-based kubeconfig for a cluster
|
||||
@@ -48,8 +189,9 @@ pinniped get kubeconfig [flags]
|
||||
--static-token string Instead of doing an OIDC-based login, specify a static token
|
||||
--static-token-env string Instead of doing an OIDC-based login, read a static token from the environment
|
||||
--timeout duration Timeout for autodiscovery and validation (default 10m0s)
|
||||
--upstream-identity-provider-flow string The type of client flow to use with the upstream identity provider during login with a Supervisor (e.g. 'cli_password', 'browser_authcode')
|
||||
--upstream-identity-provider-name string The name of the upstream identity provider used during login with a Supervisor
|
||||
--upstream-identity-provider-type string The type of the upstream identity provider used during login with a Supervisor (e.g. 'oidc', 'ldap')
|
||||
--upstream-identity-provider-type string The type of the upstream identity provider used during login with a Supervisor (e.g. 'oidc', 'ldap', 'activedirectory')
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
115
site/content/posts/2021-08-27-supporting-ad-oidc-workflows.md
Normal file
115
site/content/posts/2021-08-27-supporting-ad-oidc-workflows.md
Normal file
@@ -0,0 +1,115 @@
|
||||
---
|
||||
title: "Pinniped v0.11.0: Easy Configurations for Active Directory, OIDC CLI workflows and more"
|
||||
slug: supporting-ad-oidc-workflows
|
||||
date: 2021-08-31
|
||||
author: Anjali Telang
|
||||
image: https://images.unsplash.com/photo-1574090695368-bac29418e5dc?ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=1350&q=80
|
||||
excerpt: "With the release of v0.11.0, Pinniped offers CRDs for easy Active Directory configuration, OIDC password grant flow for CLI workflows, and Distroless images for security and performance"
|
||||
tags: ['Margo Crawford','Ryan Richard', 'Anjali Telang', 'release']
|
||||
---
|
||||
|
||||

|
||||
*Photo by [Eelco van der Wal](https://unsplash.com/@eelcovdwal) on [Unsplash](https://unsplash.com/s/photos/seal)*
|
||||
|
||||
## CRDs for easy Active Directory Configuration!
|
||||
|
||||
Microsoft Active Directory (AD) is one of the most popular and widely used Identity Providers. Active Directory Domain Services (AD DS) is the foundation of every Windows domain network. It stores information about members of the domain, including devices and users, verifies their credentials and defines their access rights. While AD is widely used in legacy systems, configuring Active Directory has been somewhat of a challenge in the cloud native environments.
|
||||
|
||||
In our previous post on LDAP, we mentioned that the reason to support LDAP and AD was primarily to help the cluster administrator easily manage and configure these Identity Providers using Kubernetes APIs. Some of the available identity shims, such as Dex and UAA, can be used between Pinniped and the Identity providers, but they are difficult to configure and the cluster administration may not be able to manage their Day 2 operations using Kubernetes APIs.
|
||||
|
||||
Our initial LDAP implementation released with v.10.0 can be used to work with any LDAP based Identity Provider including Active Directory, but with this release we provide APIs that are specifically tailored to the Active Directory configuration.
|
||||
|
||||
### Setup and Use AD with your Supervisor
|
||||
|
||||
Pinniped Supervisor authenticates your users with the AD provider via the LDAP protocol, and then issues unique, short-lived, per-cluster tokens. Our previous blog post on [LDAP configuration]({{< ref "2021-06-02-first-ldap-release.md">}}), elaborates on the security considerations to support integration at the Pinniped Supervisor level instead of at the Concierge.
|
||||
|
||||
To setup the AD configuration, once you have Supervisor configured with ingress [installed the Pinniped Supervisor]({{< ref "docs/howto/install-supervisor.md" >}}) and you have [configured a FederationDomain]({{< ref "docs/howto/configure-supervisor" >}}) to issue tokens for your downstream clusters, you can create an [ActiveDirectoryIdentityProvider](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#activedirectoryidentityprovider) in the same namespace as the Supervisor.
|
||||
Here’s what an example configuration looks like
|
||||
|
||||
```yaml
|
||||
apiVersion: idp.supervisor.pinniped.dev/v1alpha1
|
||||
kind: ActiveDirectoryIdentityProvider
|
||||
metadata:
|
||||
name: my-active-directory-idp
|
||||
namespace: pinniped-supervisor
|
||||
spec:
|
||||
|
||||
# Specify the host of the Active Directory server.
|
||||
host: "activedirectory.example.com:636"
|
||||
|
||||
# Specify the name of the Kubernetes Secret that contains your Active Directory
|
||||
# bind account credentials. This service account will be used by the
|
||||
# Supervisor to perform LDAP user and group searches.
|
||||
bind:
|
||||
secretName: "active-directory-bind-account"
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: active-directory-bind-account
|
||||
namespace: pinniped-supervisor
|
||||
type: kubernetes.io/basic-auth
|
||||
stringData:
|
||||
|
||||
# The dn (distinguished name) of your Active Directory bind account.
|
||||
username: "CN=Bind User,OU=Users,DC=activedirectory,DC=example,dc=com"
|
||||
|
||||
# The password of your Active Directory bind account.
|
||||
password: "YOUR_PASSWORD"
|
||||
```
|
||||
|
||||
You can also customize the userSearch and groupSearch as shown in the examples in our reference documentation [here]({{< ref "docs/howto/configure-supervisor-with-activedirectory.md" >}})
|
||||
|
||||
In the above example, users will be able to login with either their sAMAccountName (i.e. pinny), userPrincipalName (i.e. pinny@example.com) or mail attribute. This reduces the need to tell users what specific value from AD must be provided in the username field. Regardless of what value the user provides in the username field, the userPrincipalName will be used as the identity in Kubernetes clusters. UPN is used as the username attribute by default as it is unique within an AD forest. Similarly, a UPN is generated for each group using its sAMAccountName attribute and the AD domain hostname. The default AD configuration finds both direct and nested groups.
|
||||
|
||||
After logging in, running the `pinniped whoami` command displays:
|
||||
```
|
||||
Current cluster info:
|
||||
|
||||
Name: cluster-name
|
||||
URL: https://cluster.example.com
|
||||
|
||||
Current user info:
|
||||
|
||||
Username: pinny@example.com
|
||||
Groups: Mammals@example.com, Marine Mammals@example.com, system:authenticated
|
||||
```
|
||||
|
||||
## OIDC CLI-based workflows
|
||||
|
||||
In v0.10.0 we included support for Non-Interactive Password based LDAP logins to support CI/CD workflows. In this release, we extend the same capabilities to OIDC logins by using OIDC Password Grant. If the OIDC provider server supports the OAuth 2.0 resource owner password credentials grant, then you may optionally choose to configure `allowPasswordGrant` to `true` to allow clients to perform this type of authentication. Clients will be prompted for their username and password on the command-line without opening a browser window.
|
||||
It is important to note that [Resource Owner Password Credentials Grant](https://datatracker.ietf.org/doc/html/rfc6749#section-4.3) from OAuth 2.0 is generally considered unsafe and should only be used when there is a trust relationship between the client and resource owner as it exposes client credentials to the resource owner. Refer to Security Best practices [here](https://datatracker.ietf.org/doc/html/rfc6749#section-4.3) However, it could be useful for use cases, such as for CI/CD where you may be authenticating to the Kubernetes cluster using an OIDC service account.
|
||||
|
||||
### How this works with Pinniped
|
||||
A few considerations while configuring this on the cluster:
|
||||
|
||||
Confirm that Multi-factor authentication is not intended to be used on the cluster
|
||||
Pinniped CLI running on your workstation and the Pinniped Supervisor backend are trusted to handle your password
|
||||
|
||||
With the new functionality, Users initiate `pinniped get kubeconfig` with a new argument `--upstream-identity-provider-flow="cli_password"` to indicate their intent to use Password grant auth flow for logging into the upstream OIDC provider. By default, if no argument is specified this will follow the Browser-based auth flow. This way older Pinniped CLI versions will default to using Browser-based auth and the default for older Supervisor versions with newer CLI versions will also be Browser-based authentication.
|
||||
|
||||
## Distroless-based container images
|
||||
|
||||
In this release, we are moving our base container images from Debian to Distroless as it not only increases performance by providing much smaller sized images, but enhances security by removing dependencies on system libraries that may have vulnerabilities.
|
||||
|
||||
|
||||
Refer to the [release notes for v0.11.0](https://github.com/vmware-tanzu/pinniped/releases/tag/v0.11.0) for a complete list of fixes and features included in the release.
|
||||
|
||||
## Tell us about your configuration and use cases!
|
||||
|
||||
We invite your suggestions and contributions to make Pinniped work for your configuration and use cases.
|
||||
|
||||
The Pinniped community is a vital part of the project's success. This release includes important feedback from community user [Scott Rosenberg](https://github.com/vrabbi) who helped us better understand Active Directory configurations and provided valuable feedback for the OIDC Password Grant feature. Thank you for helping improve Pinniped!
|
||||
|
||||
We thrive on community feedback.
|
||||
[Are you using Pinniped?](https://github.com/vmware-tanzu/pinniped/discussions/152)
|
||||
Did you try our new LDAP or AD features?
|
||||
What other configurations do you need for authenticating users to your Kubernetes clusters?
|
||||
|
||||
Find us in [#pinniped](https://kubernetes.slack.com/archives/C01BW364RJA) on Kubernetes Slack,
|
||||
[create an issue](https://github.com/vmware-tanzu/pinniped/issues/new/choose) on our Github repository,
|
||||
or start a [Discussion](https://github.com/vmware-tanzu/pinniped/discussions).
|
||||
|
||||
{{< community >}}
|
||||
@@ -9,13 +9,6 @@
|
||||
<p class="position">Engineer</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="bio">
|
||||
<div class="image"><img src="/img/matt-moyer.png" /></div>
|
||||
<div class="info">
|
||||
<p class="name">Matt Moyer</p>
|
||||
<p class="position">Engineer</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="bio">
|
||||
<div class="image"><img src="/img/mo-khan.png" /></div>
|
||||
<div class="info">
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 21 KiB |
@@ -32,7 +32,7 @@ func TestKubeCertAgent(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
agentPods, err := kubeClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: "kube-cert-agent.pinniped.dev=v2",
|
||||
LabelSelector: "kube-cert-agent.pinniped.dev=v3",
|
||||
})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list pods: %w", err)
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/oauth2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/config/v1alpha1"
|
||||
idpv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/idp/v1alpha1"
|
||||
@@ -327,6 +328,188 @@ func TestSupervisorLogin(t *testing.T) {
|
||||
wantErrorDescription: "The resource owner or authorization server denied the request. Username/password not accepted by LDAP provider.",
|
||||
wantErrorType: "access_denied",
|
||||
},
|
||||
{
|
||||
name: "ldap login still works after updating bind secret",
|
||||
maybeSkip: func(t *testing.T) {
|
||||
t.Helper()
|
||||
if len(env.ToolsNamespace) == 0 && !env.HasCapability(testlib.CanReachInternetLDAPPorts) {
|
||||
t.Skip("LDAP integration test requires connectivity to an LDAP server")
|
||||
}
|
||||
},
|
||||
createIDP: func(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
secret := testlib.CreateTestSecret(t, env.SupervisorNamespace, "ldap-service-account", v1.SecretTypeBasicAuth,
|
||||
map[string]string{
|
||||
v1.BasicAuthUsernameKey: env.SupervisorUpstreamLDAP.BindUsername,
|
||||
v1.BasicAuthPasswordKey: env.SupervisorUpstreamLDAP.BindPassword,
|
||||
},
|
||||
)
|
||||
secretName := secret.Name
|
||||
ldapIDP := testlib.CreateTestLDAPIdentityProvider(t, idpv1alpha1.LDAPIdentityProviderSpec{
|
||||
Host: env.SupervisorUpstreamLDAP.Host,
|
||||
TLS: &idpv1alpha1.TLSSpec{
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorUpstreamLDAP.CABundle)),
|
||||
},
|
||||
Bind: idpv1alpha1.LDAPIdentityProviderBind{
|
||||
SecretName: secretName,
|
||||
},
|
||||
UserSearch: idpv1alpha1.LDAPIdentityProviderUserSearch{
|
||||
Base: env.SupervisorUpstreamLDAP.UserSearchBase,
|
||||
Filter: "",
|
||||
Attributes: idpv1alpha1.LDAPIdentityProviderUserSearchAttributes{
|
||||
Username: env.SupervisorUpstreamLDAP.TestUserMailAttributeName,
|
||||
UID: env.SupervisorUpstreamLDAP.TestUserUniqueIDAttributeName,
|
||||
},
|
||||
},
|
||||
GroupSearch: idpv1alpha1.LDAPIdentityProviderGroupSearch{
|
||||
Base: env.SupervisorUpstreamLDAP.GroupSearchBase,
|
||||
Filter: "",
|
||||
Attributes: idpv1alpha1.LDAPIdentityProviderGroupSearchAttributes{
|
||||
GroupName: "dn",
|
||||
},
|
||||
},
|
||||
}, idpv1alpha1.LDAPPhaseReady)
|
||||
|
||||
secret.Annotations = map[string]string{"pinniped.dev/test": "", "another-label": "another-key"}
|
||||
// update that secret, which will cause the cache to recheck tls and search base values
|
||||
client := testlib.NewKubernetesClientset(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
updatedSecret, err := client.CoreV1().Secrets(env.SupervisorNamespace).Update(ctx, secret, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedMsg := fmt.Sprintf(
|
||||
`successfully able to connect to "%s" and bind as user "%s" [validated with Secret "%s" at version "%s"]`,
|
||||
env.SupervisorUpstreamLDAP.Host, env.SupervisorUpstreamLDAP.BindUsername,
|
||||
updatedSecret.Name, updatedSecret.ResourceVersion,
|
||||
)
|
||||
supervisorClient := testlib.NewSupervisorClientset(t)
|
||||
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
ldapIDP, err = supervisorClient.IDPV1alpha1().LDAPIdentityProviders(env.SupervisorNamespace).Get(ctx, ldapIDP.Name, metav1.GetOptions{})
|
||||
requireEventually.NoError(err)
|
||||
requireEventuallySuccessfulLDAPIdentityProviderConditions(t, requireEventually, ldapIDP, expectedMsg)
|
||||
}, time.Minute, 500*time.Millisecond)
|
||||
},
|
||||
requestAuthorization: func(t *testing.T, downstreamAuthorizeURL, _ string, httpClient *http.Client) {
|
||||
requestAuthorizationUsingCLIPasswordFlow(t,
|
||||
downstreamAuthorizeURL,
|
||||
env.SupervisorUpstreamLDAP.TestUserMailAttributeValue, // username to present to server during login
|
||||
env.SupervisorUpstreamLDAP.TestUserPassword, // password to present to server during login
|
||||
httpClient,
|
||||
false,
|
||||
)
|
||||
},
|
||||
// the ID token Subject should be the Host URL plus the value pulled from the requested UserSearch.Attributes.UID attribute
|
||||
wantDownstreamIDTokenSubjectToMatch: "^" + regexp.QuoteMeta(
|
||||
"ldaps://"+env.SupervisorUpstreamLDAP.Host+
|
||||
"?base="+url.QueryEscape(env.SupervisorUpstreamLDAP.UserSearchBase)+
|
||||
"&sub="+base64.RawURLEncoding.EncodeToString([]byte(env.SupervisorUpstreamLDAP.TestUserUniqueIDAttributeValue)),
|
||||
) + "$",
|
||||
// the ID token Username should have been pulled from the requested UserSearch.Attributes.Username attribute
|
||||
wantDownstreamIDTokenUsernameToMatch: "^" + regexp.QuoteMeta(env.SupervisorUpstreamLDAP.TestUserMailAttributeValue) + "$",
|
||||
wantDownstreamIDTokenGroups: env.SupervisorUpstreamLDAP.TestUserDirectGroupsDNs,
|
||||
},
|
||||
{
|
||||
name: "ldap login still works after deleting and recreating the bind secret",
|
||||
maybeSkip: func(t *testing.T) {
|
||||
t.Helper()
|
||||
if len(env.ToolsNamespace) == 0 && !env.HasCapability(testlib.CanReachInternetLDAPPorts) {
|
||||
t.Skip("LDAP integration test requires connectivity to an LDAP server")
|
||||
}
|
||||
},
|
||||
createIDP: func(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
secret := testlib.CreateTestSecret(t, env.SupervisorNamespace, "ldap-service-account", v1.SecretTypeBasicAuth,
|
||||
map[string]string{
|
||||
v1.BasicAuthUsernameKey: env.SupervisorUpstreamLDAP.BindUsername,
|
||||
v1.BasicAuthPasswordKey: env.SupervisorUpstreamLDAP.BindPassword,
|
||||
},
|
||||
)
|
||||
secretName := secret.Name
|
||||
ldapIDP := testlib.CreateTestLDAPIdentityProvider(t, idpv1alpha1.LDAPIdentityProviderSpec{
|
||||
Host: env.SupervisorUpstreamLDAP.Host,
|
||||
TLS: &idpv1alpha1.TLSSpec{
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorUpstreamLDAP.CABundle)),
|
||||
},
|
||||
Bind: idpv1alpha1.LDAPIdentityProviderBind{
|
||||
SecretName: secretName,
|
||||
},
|
||||
UserSearch: idpv1alpha1.LDAPIdentityProviderUserSearch{
|
||||
Base: env.SupervisorUpstreamLDAP.UserSearchBase,
|
||||
Filter: "",
|
||||
Attributes: idpv1alpha1.LDAPIdentityProviderUserSearchAttributes{
|
||||
Username: env.SupervisorUpstreamLDAP.TestUserMailAttributeName,
|
||||
UID: env.SupervisorUpstreamLDAP.TestUserUniqueIDAttributeName,
|
||||
},
|
||||
},
|
||||
GroupSearch: idpv1alpha1.LDAPIdentityProviderGroupSearch{
|
||||
Base: env.SupervisorUpstreamLDAP.GroupSearchBase,
|
||||
Filter: "",
|
||||
Attributes: idpv1alpha1.LDAPIdentityProviderGroupSearchAttributes{
|
||||
GroupName: "dn",
|
||||
},
|
||||
},
|
||||
}, idpv1alpha1.LDAPPhaseReady)
|
||||
|
||||
// delete, then recreate that secret, which will cause the cache to recheck tls and search base values
|
||||
client := testlib.NewKubernetesClientset(t)
|
||||
deleteCtx, deleteCancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer deleteCancel()
|
||||
err := client.CoreV1().Secrets(env.SupervisorNamespace).Delete(deleteCtx, secretName, metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// create the secret again
|
||||
recreateCtx, recreateCancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer recreateCancel()
|
||||
recreatedSecret, err := client.CoreV1().Secrets(env.SupervisorNamespace).Create(recreateCtx, &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
Namespace: env.SupervisorNamespace,
|
||||
},
|
||||
Type: v1.SecretTypeBasicAuth,
|
||||
StringData: map[string]string{
|
||||
v1.BasicAuthUsernameKey: env.SupervisorUpstreamLDAP.BindUsername,
|
||||
v1.BasicAuthPasswordKey: env.SupervisorUpstreamLDAP.BindPassword,
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
expectedMsg := fmt.Sprintf(
|
||||
`successfully able to connect to "%s" and bind as user "%s" [validated with Secret "%s" at version "%s"]`,
|
||||
env.SupervisorUpstreamLDAP.Host, env.SupervisorUpstreamLDAP.BindUsername,
|
||||
recreatedSecret.Name, recreatedSecret.ResourceVersion,
|
||||
)
|
||||
supervisorClient := testlib.NewSupervisorClientset(t)
|
||||
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
ldapIDP, err = supervisorClient.IDPV1alpha1().LDAPIdentityProviders(env.SupervisorNamespace).Get(ctx, ldapIDP.Name, metav1.GetOptions{})
|
||||
requireEventually.NoError(err)
|
||||
requireEventuallySuccessfulLDAPIdentityProviderConditions(t, requireEventually, ldapIDP, expectedMsg)
|
||||
}, time.Minute, 500*time.Millisecond)
|
||||
},
|
||||
requestAuthorization: func(t *testing.T, downstreamAuthorizeURL, _ string, httpClient *http.Client) {
|
||||
requestAuthorizationUsingCLIPasswordFlow(t,
|
||||
downstreamAuthorizeURL,
|
||||
env.SupervisorUpstreamLDAP.TestUserMailAttributeValue, // username to present to server during login
|
||||
env.SupervisorUpstreamLDAP.TestUserPassword, // password to present to server during login
|
||||
httpClient,
|
||||
false,
|
||||
)
|
||||
},
|
||||
// the ID token Subject should be the Host URL plus the value pulled from the requested UserSearch.Attributes.UID attribute
|
||||
wantDownstreamIDTokenSubjectToMatch: "^" + regexp.QuoteMeta(
|
||||
"ldaps://"+env.SupervisorUpstreamLDAP.Host+
|
||||
"?base="+url.QueryEscape(env.SupervisorUpstreamLDAP.UserSearchBase)+
|
||||
"&sub="+base64.RawURLEncoding.EncodeToString([]byte(env.SupervisorUpstreamLDAP.TestUserUniqueIDAttributeValue)),
|
||||
) + "$",
|
||||
// the ID token Username should have been pulled from the requested UserSearch.Attributes.Username attribute
|
||||
wantDownstreamIDTokenUsernameToMatch: "^" + regexp.QuoteMeta(env.SupervisorUpstreamLDAP.TestUserMailAttributeValue) + "$",
|
||||
wantDownstreamIDTokenGroups: env.SupervisorUpstreamLDAP.TestUserDirectGroupsDNs,
|
||||
},
|
||||
{
|
||||
name: "activedirectory with all default options",
|
||||
maybeSkip: func(t *testing.T) {
|
||||
@@ -448,6 +631,165 @@ func TestSupervisorLogin(t *testing.T) {
|
||||
wantDownstreamIDTokenUsernameToMatch: "^" + regexp.QuoteMeta(env.SupervisorUpstreamActiveDirectory.TestUserMailAttributeValue) + "$",
|
||||
wantDownstreamIDTokenGroups: env.SupervisorUpstreamActiveDirectory.TestUserDirectGroupsDNs,
|
||||
},
|
||||
{
|
||||
name: "active directory login still works after updating bind secret",
|
||||
maybeSkip: func(t *testing.T) {
|
||||
t.Helper()
|
||||
if len(env.ToolsNamespace) == 0 && !env.HasCapability(testlib.CanReachInternetLDAPPorts) {
|
||||
t.Skip("LDAP integration test requires connectivity to an LDAP server")
|
||||
}
|
||||
if env.SupervisorUpstreamActiveDirectory.Host == "" {
|
||||
t.Skip("Active Directory hostname not specified")
|
||||
}
|
||||
},
|
||||
createIDP: func(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
secret := testlib.CreateTestSecret(t, env.SupervisorNamespace, "ad-service-account", v1.SecretTypeBasicAuth,
|
||||
map[string]string{
|
||||
v1.BasicAuthUsernameKey: env.SupervisorUpstreamActiveDirectory.BindUsername,
|
||||
v1.BasicAuthPasswordKey: env.SupervisorUpstreamActiveDirectory.BindPassword,
|
||||
},
|
||||
)
|
||||
secretName := secret.Name
|
||||
adIDP := testlib.CreateTestActiveDirectoryIdentityProvider(t, idpv1alpha1.ActiveDirectoryIdentityProviderSpec{
|
||||
Host: env.SupervisorUpstreamActiveDirectory.Host,
|
||||
TLS: &idpv1alpha1.TLSSpec{
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorUpstreamActiveDirectory.CABundle)),
|
||||
},
|
||||
Bind: idpv1alpha1.ActiveDirectoryIdentityProviderBind{
|
||||
SecretName: secretName,
|
||||
},
|
||||
}, idpv1alpha1.ActiveDirectoryPhaseReady)
|
||||
|
||||
secret.Annotations = map[string]string{"pinniped.dev/test": "", "another-label": "another-key"}
|
||||
// update that secret, which will cause the cache to recheck tls and search base values
|
||||
client := testlib.NewKubernetesClientset(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
updatedSecret, err := client.CoreV1().Secrets(env.SupervisorNamespace).Update(ctx, secret, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedMsg := fmt.Sprintf(
|
||||
`successfully able to connect to "%s" and bind as user "%s" [validated with Secret "%s" at version "%s"]`,
|
||||
env.SupervisorUpstreamActiveDirectory.Host, env.SupervisorUpstreamActiveDirectory.BindUsername,
|
||||
updatedSecret.Name, updatedSecret.ResourceVersion,
|
||||
)
|
||||
supervisorClient := testlib.NewSupervisorClientset(t)
|
||||
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
adIDP, err = supervisorClient.IDPV1alpha1().ActiveDirectoryIdentityProviders(env.SupervisorNamespace).Get(ctx, adIDP.Name, metav1.GetOptions{})
|
||||
requireEventually.NoError(err)
|
||||
requireEventuallySuccessfulActiveDirectoryIdentityProviderConditions(t, requireEventually, adIDP, expectedMsg)
|
||||
}, time.Minute, 500*time.Millisecond)
|
||||
},
|
||||
requestAuthorization: func(t *testing.T, downstreamAuthorizeURL, _ string, httpClient *http.Client) {
|
||||
requestAuthorizationUsingCLIPasswordFlow(t,
|
||||
downstreamAuthorizeURL,
|
||||
env.SupervisorUpstreamActiveDirectory.TestUserPrincipalNameValue, // username to present to server during login
|
||||
env.SupervisorUpstreamActiveDirectory.TestUserPassword, // password to present to server during login
|
||||
httpClient,
|
||||
false,
|
||||
)
|
||||
},
|
||||
// the ID token Subject should be the Host URL plus the value pulled from the requested UserSearch.Attributes.UID attribute
|
||||
wantDownstreamIDTokenSubjectToMatch: "^" + regexp.QuoteMeta(
|
||||
"ldaps://"+env.SupervisorUpstreamActiveDirectory.Host+
|
||||
"?base="+url.QueryEscape(env.SupervisorUpstreamActiveDirectory.DefaultNamingContextSearchBase)+
|
||||
"&sub="+env.SupervisorUpstreamActiveDirectory.TestUserUniqueIDAttributeValue,
|
||||
) + "$",
|
||||
// the ID token Username should have been pulled from the requested UserSearch.Attributes.Username attribute
|
||||
wantDownstreamIDTokenUsernameToMatch: "^" + regexp.QuoteMeta(env.SupervisorUpstreamActiveDirectory.TestUserPrincipalNameValue) + "$",
|
||||
wantDownstreamIDTokenGroups: env.SupervisorUpstreamActiveDirectory.TestUserIndirectGroupsSAMAccountPlusDomainNames,
|
||||
},
|
||||
{
|
||||
name: "active directory login still works after deleting and recreating bind secret",
|
||||
maybeSkip: func(t *testing.T) {
|
||||
t.Helper()
|
||||
if len(env.ToolsNamespace) == 0 && !env.HasCapability(testlib.CanReachInternetLDAPPorts) {
|
||||
t.Skip("LDAP integration test requires connectivity to an LDAP server")
|
||||
}
|
||||
if env.SupervisorUpstreamActiveDirectory.Host == "" {
|
||||
t.Skip("Active Directory hostname not specified")
|
||||
}
|
||||
},
|
||||
createIDP: func(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
secret := testlib.CreateTestSecret(t, env.SupervisorNamespace, "ad-service-account", v1.SecretTypeBasicAuth,
|
||||
map[string]string{
|
||||
v1.BasicAuthUsernameKey: env.SupervisorUpstreamActiveDirectory.BindUsername,
|
||||
v1.BasicAuthPasswordKey: env.SupervisorUpstreamActiveDirectory.BindPassword,
|
||||
},
|
||||
)
|
||||
secretName := secret.Name
|
||||
adIDP := testlib.CreateTestActiveDirectoryIdentityProvider(t, idpv1alpha1.ActiveDirectoryIdentityProviderSpec{
|
||||
Host: env.SupervisorUpstreamActiveDirectory.Host,
|
||||
TLS: &idpv1alpha1.TLSSpec{
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorUpstreamActiveDirectory.CABundle)),
|
||||
},
|
||||
Bind: idpv1alpha1.ActiveDirectoryIdentityProviderBind{
|
||||
SecretName: secretName,
|
||||
},
|
||||
}, idpv1alpha1.ActiveDirectoryPhaseReady)
|
||||
|
||||
// delete the secret
|
||||
client := testlib.NewKubernetesClientset(t)
|
||||
deleteCtx, deleteCancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer deleteCancel()
|
||||
err := client.CoreV1().Secrets(env.SupervisorNamespace).Delete(deleteCtx, secretName, metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// create the secret again
|
||||
recreateCtx, recreateCancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer recreateCancel()
|
||||
recreatedSecret, err := client.CoreV1().Secrets(env.SupervisorNamespace).Create(recreateCtx, &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
Namespace: env.SupervisorNamespace,
|
||||
},
|
||||
Type: v1.SecretTypeBasicAuth,
|
||||
StringData: map[string]string{
|
||||
v1.BasicAuthUsernameKey: env.SupervisorUpstreamActiveDirectory.BindUsername,
|
||||
v1.BasicAuthPasswordKey: env.SupervisorUpstreamActiveDirectory.BindPassword,
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedMsg := fmt.Sprintf(
|
||||
`successfully able to connect to "%s" and bind as user "%s" [validated with Secret "%s" at version "%s"]`,
|
||||
env.SupervisorUpstreamActiveDirectory.Host, env.SupervisorUpstreamActiveDirectory.BindUsername,
|
||||
recreatedSecret.Name, recreatedSecret.ResourceVersion,
|
||||
)
|
||||
supervisorClient := testlib.NewSupervisorClientset(t)
|
||||
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
adIDP, err = supervisorClient.IDPV1alpha1().ActiveDirectoryIdentityProviders(env.SupervisorNamespace).Get(ctx, adIDP.Name, metav1.GetOptions{})
|
||||
requireEventually.NoError(err)
|
||||
requireEventuallySuccessfulActiveDirectoryIdentityProviderConditions(t, requireEventually, adIDP, expectedMsg)
|
||||
}, time.Minute, 500*time.Millisecond)
|
||||
},
|
||||
requestAuthorization: func(t *testing.T, downstreamAuthorizeURL, _ string, httpClient *http.Client) {
|
||||
requestAuthorizationUsingCLIPasswordFlow(t,
|
||||
downstreamAuthorizeURL,
|
||||
env.SupervisorUpstreamActiveDirectory.TestUserPrincipalNameValue, // username to present to server during login
|
||||
env.SupervisorUpstreamActiveDirectory.TestUserPassword, // password to present to server during login
|
||||
httpClient,
|
||||
false,
|
||||
)
|
||||
},
|
||||
// the ID token Subject should be the Host URL plus the value pulled from the requested UserSearch.Attributes.UID attribute
|
||||
wantDownstreamIDTokenSubjectToMatch: "^" + regexp.QuoteMeta(
|
||||
"ldaps://"+env.SupervisorUpstreamActiveDirectory.Host+
|
||||
"?base="+url.QueryEscape(env.SupervisorUpstreamActiveDirectory.DefaultNamingContextSearchBase)+
|
||||
"&sub="+env.SupervisorUpstreamActiveDirectory.TestUserUniqueIDAttributeValue,
|
||||
) + "$",
|
||||
// the ID token Username should have been pulled from the requested UserSearch.Attributes.Username attribute
|
||||
wantDownstreamIDTokenUsernameToMatch: "^" + regexp.QuoteMeta(env.SupervisorUpstreamActiveDirectory.TestUserPrincipalNameValue) + "$",
|
||||
wantDownstreamIDTokenGroups: env.SupervisorUpstreamActiveDirectory.TestUserIndirectGroupsSAMAccountPlusDomainNames,
|
||||
},
|
||||
{
|
||||
name: "logging in to activedirectory with a deactivated user fails",
|
||||
maybeSkip: func(t *testing.T) {
|
||||
@@ -570,6 +912,66 @@ func requireSuccessfulActiveDirectoryIdentityProviderConditions(t *testing.T, ad
|
||||
}, conditionsSummary)
|
||||
}
|
||||
|
||||
func requireEventuallySuccessfulLDAPIdentityProviderConditions(t *testing.T, requireEventually *require.Assertions, ldapIDP *idpv1alpha1.LDAPIdentityProvider, expectedLDAPConnectionValidMessage string) {
|
||||
t.Helper()
|
||||
requireEventually.Len(ldapIDP.Status.Conditions, 3)
|
||||
|
||||
conditionsSummary := [][]string{}
|
||||
for _, condition := range ldapIDP.Status.Conditions {
|
||||
conditionsSummary = append(conditionsSummary, []string{condition.Type, string(condition.Status), condition.Reason})
|
||||
t.Logf("Saw ActiveDirectoryIdentityProvider Status.Condition Type=%s Status=%s Reason=%s Message=%s",
|
||||
condition.Type, string(condition.Status), condition.Reason, condition.Message)
|
||||
switch condition.Type {
|
||||
case "BindSecretValid":
|
||||
requireEventually.Equal("loaded bind secret", condition.Message)
|
||||
case "TLSConfigurationValid":
|
||||
requireEventually.Equal("loaded TLS configuration", condition.Message)
|
||||
case "LDAPConnectionValid":
|
||||
requireEventually.Equal(expectedLDAPConnectionValidMessage, condition.Message)
|
||||
}
|
||||
}
|
||||
|
||||
requireEventually.ElementsMatch([][]string{
|
||||
{"BindSecretValid", "True", "Success"},
|
||||
{"TLSConfigurationValid", "True", "Success"},
|
||||
{"LDAPConnectionValid", "True", "Success"},
|
||||
}, conditionsSummary)
|
||||
}
|
||||
|
||||
func requireEventuallySuccessfulActiveDirectoryIdentityProviderConditions(t *testing.T, requireEventually *require.Assertions, adIDP *idpv1alpha1.ActiveDirectoryIdentityProvider, expectedActiveDirectoryConnectionValidMessage string) {
|
||||
t.Helper()
|
||||
requireEventually.Len(adIDP.Status.Conditions, 4)
|
||||
|
||||
conditionsSummary := [][]string{}
|
||||
for _, condition := range adIDP.Status.Conditions {
|
||||
conditionsSummary = append(conditionsSummary, []string{condition.Type, string(condition.Status), condition.Reason})
|
||||
t.Logf("Saw ActiveDirectoryIdentityProvider Status.Condition Type=%s Status=%s Reason=%s Message=%s",
|
||||
condition.Type, string(condition.Status), condition.Reason, condition.Message)
|
||||
switch condition.Type {
|
||||
case "BindSecretValid":
|
||||
requireEventually.Equal("loaded bind secret", condition.Message)
|
||||
case "TLSConfigurationValid":
|
||||
requireEventually.Equal("loaded TLS configuration", condition.Message)
|
||||
case "LDAPConnectionValid":
|
||||
requireEventually.Equal(expectedActiveDirectoryConnectionValidMessage, condition.Message)
|
||||
}
|
||||
}
|
||||
|
||||
expectedUserSearchReason := ""
|
||||
if adIDP.Spec.UserSearch.Base == "" || adIDP.Spec.GroupSearch.Base == "" {
|
||||
expectedUserSearchReason = "Success"
|
||||
} else {
|
||||
expectedUserSearchReason = "UsingConfigurationFromSpec"
|
||||
}
|
||||
|
||||
requireEventually.ElementsMatch([][]string{
|
||||
{"BindSecretValid", "True", "Success"},
|
||||
{"TLSConfigurationValid", "True", "Success"},
|
||||
{"LDAPConnectionValid", "True", "Success"},
|
||||
{"SearchBaseFound", "True", expectedUserSearchReason},
|
||||
}, conditionsSummary)
|
||||
}
|
||||
|
||||
func testSupervisorLogin(
|
||||
t *testing.T,
|
||||
createIDP func(t *testing.T),
|
||||
|
||||
@@ -464,7 +464,7 @@ func CreateTestActiveDirectoryIdentityProvider(t *testing.T, spec idpv1alpha1.Ac
|
||||
})
|
||||
t.Logf("created test ActiveDirectoryIdentityProvider %s", created.Name)
|
||||
|
||||
// Wait for the LDAPIdentityProvider to enter the expected phase (or time out).
|
||||
// Wait for the ActiveDirectoryIdentityProvider to enter the expected phase (or time out).
|
||||
var result *idpv1alpha1.ActiveDirectoryIdentityProvider
|
||||
RequireEventuallyf(t,
|
||||
func(requireEventually *require.Assertions) {
|
||||
|
||||
Reference in New Issue
Block a user