mirror of
https://github.com/vmware-tanzu/pinniped.git
synced 2026-03-17 09:00:37 +00:00
Compare commits
102 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1ddc85495f | ||
|
|
716659b74a | ||
|
|
696c2b9133 | ||
|
|
0770682bf9 | ||
|
|
88ff3164a2 | ||
|
|
56d316e8d3 | ||
|
|
9fc7f43245 | ||
|
|
47f5e822d0 | ||
|
|
cc99d9aeb4 | ||
|
|
7ece196893 | ||
|
|
a08a28d67b | ||
|
|
2634c9f04a | ||
|
|
29a1ca5168 | ||
|
|
5240f5e84a | ||
|
|
a8bccc5432 | ||
|
|
f167a075dd | ||
|
|
8136c787a7 | ||
|
|
3e13b5f39d | ||
|
|
1a2940c278 | ||
|
|
4bb0fdeddd | ||
|
|
4ce77c4837 | ||
|
|
1586171876 | ||
|
|
165bef7809 | ||
|
|
b80cbb8cc5 | ||
|
|
71e38d232e | ||
|
|
ab94b97f4a | ||
|
|
d6a172214d | ||
|
|
638fa7ba27 | ||
|
|
b5ffab6330 | ||
|
|
8556a638a2 | ||
|
|
44c7f8daf0 | ||
|
|
1efa4da80c | ||
|
|
62785674c3 | ||
|
|
9e4f601a3f | ||
|
|
bb7e7fe81e | ||
|
|
bed2d2dd62 | ||
|
|
90b2854032 | ||
|
|
96fda6ed13 | ||
|
|
67a568811a | ||
|
|
620a4d55b7 | ||
|
|
a52872cd03 | ||
|
|
0dfb3e95c5 | ||
|
|
e532a88647 | ||
|
|
54a8297cc4 | ||
|
|
2843c4f8cb | ||
|
|
cc51c72c12 | ||
|
|
0ab9927115 | ||
|
|
204c8e8dbc | ||
|
|
638d9235a2 | ||
|
|
81a4c84f46 | ||
|
|
9f509d3f13 | ||
|
|
5f3eab2538 | ||
|
|
c45d48d027 | ||
|
|
09560fd8dc | ||
|
|
264778113d | ||
|
|
b5889f37ff | ||
|
|
45e4695444 | ||
|
|
6a21499ed3 | ||
|
|
211d4fd0b6 | ||
|
|
8ffd9fdc4e | ||
|
|
d76ac56df2 | ||
|
|
d86b24ca2f | ||
|
|
73716f1b91 | ||
|
|
521adffb17 | ||
|
|
70d607d87e | ||
|
|
9dfa1f5ee5 | ||
|
|
f63ded99bc | ||
|
|
e7b7b597ff | ||
|
|
e5da119000 | ||
|
|
923938ab26 | ||
|
|
352d4dc5b1 | ||
|
|
dab7b57da0 | ||
|
|
12d35583c5 | ||
|
|
599c537d24 | ||
|
|
38f3ea3f2f | ||
|
|
e450a348c5 | ||
|
|
11d820be06 | ||
|
|
63816aa3ba | ||
|
|
e5314164c5 | ||
|
|
abf606ab72 | ||
|
|
b59a4f3fec | ||
|
|
3b461572ea | ||
|
|
271c006b6c | ||
|
|
043cefcd9f | ||
|
|
2296faaeef | ||
|
|
fec24d307e | ||
|
|
64b13043ed | ||
|
|
5501b5aa13 | ||
|
|
9450048acf | ||
|
|
c53507809d | ||
|
|
9cd2b6e855 | ||
|
|
4e25bcd4b2 | ||
|
|
5add31d263 | ||
|
|
88c4335b4b | ||
|
|
623830bf1f | ||
|
|
30f476e1ac | ||
|
|
7b82b7a010 | ||
|
|
44bf925c3e | ||
|
|
d2a6d7689f | ||
|
|
23dbd7cab6 | ||
|
|
e4321cb369 | ||
|
|
ad66f67dc9 |
@@ -5,9 +5,8 @@
|
||||
./deploy
|
||||
./Dockerfile
|
||||
./generated/1.1*
|
||||
./hack/lib/tilt/
|
||||
./internal/mocks
|
||||
./LICENSE
|
||||
./site/
|
||||
./test
|
||||
**/*_test.go
|
||||
**/*_test.go
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -17,8 +17,5 @@
|
||||
# GoLand
|
||||
.idea
|
||||
|
||||
# Intermediate files used by Tilt
|
||||
/hack/lib/tilt/build
|
||||
|
||||
# MacOS Desktop Services Store
|
||||
.DS_Store
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# This is a configuration for https://pre-commit.com/.
|
||||
# On macOS, try `brew install pre-commit` and then run `pre-commit install`.
|
||||
exclude: '^(site|generated|hack/lib/tilt/tilt_modules)/'
|
||||
exclude: '^(site|generated)/'
|
||||
repos:
|
||||
- repo: git://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.2.0
|
||||
|
||||
@@ -9,6 +9,8 @@ list, [follow these directions](#adding-your-organization-to-the-list-of-adopter
|
||||
|
||||
<a href="https://kubeapps.com/" border="0" target="_blank"><img alt="kubeapps" src="site/themes/pinniped/static/img/kubeapps.svg" height="50"></a>
|
||||
|
||||
<a href="https://www.ok.dk/" border="0" target="_blank"><img alt="ok-amba" src="site/themes/pinniped/static/img/ok-amba.svg" height="50"></a>
|
||||
|
||||
## Solutions built with Pinniped
|
||||
|
||||
Below is a list of solutions where Pinniped is being used as a component.
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM golang:1.16.2 as build-env
|
||||
FROM golang:1.16.4 as build-env
|
||||
|
||||
WORKDIR /work
|
||||
COPY . .
|
||||
|
||||
@@ -43,7 +43,7 @@ To learn more, see [architecture](https://pinniped.dev/docs/background/architect
|
||||
|
||||
## Getting started with Pinniped
|
||||
|
||||
Care to kick the tires? It's easy to [install and try Pinniped](https://pinniped.dev/docs/demo/).
|
||||
Care to kick the tires? It's easy to [install and try Pinniped](https://pinniped.dev/docs/).
|
||||
|
||||
## Community meetings
|
||||
|
||||
|
||||
10
ROADMAP.md
10
ROADMAP.md
@@ -33,18 +33,18 @@ The following table includes the current roadmap for Pinniped. If you have any q
|
||||
|
||||
|
||||
|
||||
Last Updated: March 2021
|
||||
Last Updated: April 2021
|
||||
Theme|Description|Timeline|
|
||||
|--|--|--|
|
||||
|Impersonation Proxy|Adds support for more types of clusters (managed services)|Mar 2021|
|
||||
|LDAP Support|Extends upstream IDP protocols|Apr 2021|
|
||||
|Device Code Flow|Add support for OAuth 2.0 Device Authorization Grant in the Pinniped CLI and Supervisor|Apr 2021|
|
||||
|LDAP Support|Extends upstream IDP protocols|May 2021|
|
||||
|Improved Documentation|Reorganizing and improving Pinniped docs; new how-to guides and tutorials|May 2021|
|
||||
|CLI Improvements|Improving CLI UX for setting up Supervisor IDPs|May 2021|
|
||||
|Multiple IDPs|Support for multiple upstream IDPs to be configured simultaneously|Jun 2021|
|
||||
|Wider Concierge cluster support|Support for more cluster types in the Concierge|Jul 2021|
|
||||
|Improving Security Posture|Offer the best security posture for Kubernetes cluster authentication|Exploring/Ongoing|
|
||||
|Improve our CI/CD systems|Upgrade tests; make Kind more efficient and reliable for CI ; Windows tests; performance tests; scale tests; soak tests|Exploring/Ongoing|
|
||||
|CLI Improvements|Improving CLI UX for setting up Supervisor IDPs|Exploring/Ongoing|
|
||||
|Telemetry|Adding some useful phone home metrics as well as some vanity metrics|Exploring/Ongoing|
|
||||
|Observability|Expose Pinniped metrics through Prometheus Integration|Exploring/Ongoing|
|
||||
|Device Code Flow|Add support for OAuth 2.0 Device Authorization Grant in the Pinniped CLI and Supervisor|Exploring/Ongoing|
|
||||
|
||||
|
||||
|
||||
@@ -86,6 +86,9 @@ type getKubeconfigParams struct {
|
||||
staticTokenEnvName string
|
||||
oidc getKubeconfigOIDCParams
|
||||
concierge getKubeconfigConciergeParams
|
||||
generatedNameSuffix string
|
||||
credentialCachePath string
|
||||
credentialCachePathSet bool
|
||||
}
|
||||
|
||||
func kubeconfigCommand(deps kubeconfigDeps) *cobra.Command {
|
||||
@@ -130,7 +133,8 @@ func kubeconfigCommand(deps kubeconfigDeps) *cobra.Command {
|
||||
f.BoolVar(&flags.skipValidate, "skip-validation", false, "Skip final validation of the kubeconfig (default: false)")
|
||||
f.DurationVar(&flags.timeout, "timeout", 10*time.Minute, "Timeout for autodiscovery and validation")
|
||||
f.StringVarP(&flags.outputPath, "output", "o", "", "Output file path (default: stdout)")
|
||||
|
||||
f.StringVar(&flags.generatedNameSuffix, "generated-name-suffix", "-pinniped", "Suffix to append to generated cluster, context, user kubeconfig entries")
|
||||
f.StringVar(&flags.credentialCachePath, "credential-cache", "", "Path to cluster-specific credentials cache")
|
||||
mustMarkHidden(cmd, "oidc-debug-session-cache")
|
||||
|
||||
mustMarkDeprecated(cmd, "concierge-namespace", "not needed anymore")
|
||||
@@ -145,6 +149,7 @@ func kubeconfigCommand(deps kubeconfigDeps) *cobra.Command {
|
||||
defer func() { _ = out.Close() }()
|
||||
cmd.SetOut(out)
|
||||
}
|
||||
flags.credentialCachePathSet = cmd.Flags().Changed("credential-cache")
|
||||
return runGetKubeconfig(cmd.Context(), cmd.OutOrStdout(), deps, flags)
|
||||
}
|
||||
return cmd
|
||||
@@ -178,15 +183,23 @@ func runGetKubeconfig(ctx context.Context, out io.Writer, deps kubeconfigDeps, f
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not load --kubeconfig: %w", err)
|
||||
}
|
||||
cluster, err := copyCurrentClusterFromExistingKubeConfig(currentKubeConfig, flags.kubeconfigContextOverride)
|
||||
currentKubeconfigNames, err := getCurrentContext(currentKubeConfig, flags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not load --kubeconfig/--kubeconfig-context: %w", err)
|
||||
}
|
||||
cluster := currentKubeConfig.Clusters[currentKubeconfigNames.ClusterName]
|
||||
clientset, err := deps.getClientset(clientConfig, flags.concierge.apiGroupSuffix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not configure Kubernetes client: %w", err)
|
||||
}
|
||||
|
||||
// Generate the new context/cluster/user names by appending the --generated-name-suffix to the original values.
|
||||
newKubeconfigNames := &kubeconfigNames{
|
||||
ContextName: currentKubeconfigNames.ContextName + flags.generatedNameSuffix,
|
||||
UserName: currentKubeconfigNames.UserName + flags.generatedNameSuffix,
|
||||
ClusterName: currentKubeconfigNames.ClusterName + flags.generatedNameSuffix,
|
||||
}
|
||||
|
||||
if !flags.concierge.disabled {
|
||||
credentialIssuer, err := waitForCredentialIssuer(ctx, clientset, flags, deps)
|
||||
if err != nil {
|
||||
@@ -223,6 +236,11 @@ func runGetKubeconfig(ctx context.Context, out io.Writer, deps kubeconfigDeps, f
|
||||
cluster.CertificateAuthorityData = flags.concierge.caBundle
|
||||
}
|
||||
|
||||
// If --credential-cache is set, pass it through.
|
||||
if flags.credentialCachePathSet {
|
||||
execConfig.Args = append(execConfig.Args, "--credential-cache="+flags.credentialCachePath)
|
||||
}
|
||||
|
||||
// If one of the --static-* flags was passed, output a config that runs `pinniped login static`.
|
||||
if flags.staticToken != "" || flags.staticTokenEnvName != "" {
|
||||
if flags.staticToken != "" && flags.staticTokenEnvName != "" {
|
||||
@@ -236,7 +254,7 @@ func runGetKubeconfig(ctx context.Context, out io.Writer, deps kubeconfigDeps, f
|
||||
execConfig.Args = append(execConfig.Args, "--token-env="+flags.staticTokenEnvName)
|
||||
}
|
||||
|
||||
kubeconfig := newExecKubeconfig(cluster, &execConfig)
|
||||
kubeconfig := newExecKubeconfig(cluster, &execConfig, newKubeconfigNames)
|
||||
if err := validateKubeconfig(ctx, flags, kubeconfig, deps.log); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -271,13 +289,33 @@ func runGetKubeconfig(ctx context.Context, out io.Writer, deps kubeconfigDeps, f
|
||||
if flags.oidc.requestAudience != "" {
|
||||
execConfig.Args = append(execConfig.Args, "--request-audience="+flags.oidc.requestAudience)
|
||||
}
|
||||
kubeconfig := newExecKubeconfig(cluster, &execConfig)
|
||||
kubeconfig := newExecKubeconfig(cluster, &execConfig, newKubeconfigNames)
|
||||
if err := validateKubeconfig(ctx, flags, kubeconfig, deps.log); err != nil {
|
||||
return err
|
||||
}
|
||||
return writeConfigAsYAML(out, kubeconfig)
|
||||
}
|
||||
|
||||
type kubeconfigNames struct{ ContextName, UserName, ClusterName string }
|
||||
|
||||
func getCurrentContext(currentKubeConfig clientcmdapi.Config, flags getKubeconfigParams) (*kubeconfigNames, error) {
|
||||
contextName := currentKubeConfig.CurrentContext
|
||||
if flags.kubeconfigContextOverride != "" {
|
||||
contextName = flags.kubeconfigContextOverride
|
||||
}
|
||||
ctx := currentKubeConfig.Contexts[contextName]
|
||||
if ctx == nil {
|
||||
return nil, fmt.Errorf("no such context %q", contextName)
|
||||
}
|
||||
if _, exists := currentKubeConfig.Clusters[ctx.Cluster]; !exists {
|
||||
return nil, fmt.Errorf("no such cluster %q", ctx.Cluster)
|
||||
}
|
||||
if _, exists := currentKubeConfig.AuthInfos[ctx.AuthInfo]; !exists {
|
||||
return nil, fmt.Errorf("no such user %q", ctx.AuthInfo)
|
||||
}
|
||||
return &kubeconfigNames{ContextName: contextName, UserName: ctx.AuthInfo, ClusterName: ctx.Cluster}, nil
|
||||
}
|
||||
|
||||
func waitForCredentialIssuer(ctx context.Context, clientset conciergeclientset.Interface, flags getKubeconfigParams, deps kubeconfigDeps) (*configv1alpha1.CredentialIssuer, error) {
|
||||
credentialIssuer, err := lookupCredentialIssuer(clientset, flags.concierge.credentialIssuer, deps.log)
|
||||
if err != nil {
|
||||
@@ -461,15 +499,14 @@ func getConciergeFrontend(credentialIssuer *configv1alpha1.CredentialIssuer, mod
|
||||
return nil, fmt.Errorf("could not find successful Concierge strategy matching --concierge-mode=%s", mode.String())
|
||||
}
|
||||
|
||||
func newExecKubeconfig(cluster *clientcmdapi.Cluster, execConfig *clientcmdapi.ExecConfig) clientcmdapi.Config {
|
||||
const name = "pinniped"
|
||||
func newExecKubeconfig(cluster *clientcmdapi.Cluster, execConfig *clientcmdapi.ExecConfig, newNames *kubeconfigNames) clientcmdapi.Config {
|
||||
return clientcmdapi.Config{
|
||||
Kind: "Config",
|
||||
APIVersion: clientcmdapi.SchemeGroupVersion.Version,
|
||||
Clusters: map[string]*clientcmdapi.Cluster{name: cluster},
|
||||
AuthInfos: map[string]*clientcmdapi.AuthInfo{name: {Exec: execConfig}},
|
||||
Contexts: map[string]*clientcmdapi.Context{name: {Cluster: name, AuthInfo: name}},
|
||||
CurrentContext: name,
|
||||
Clusters: map[string]*clientcmdapi.Cluster{newNames.ClusterName: cluster},
|
||||
AuthInfos: map[string]*clientcmdapi.AuthInfo{newNames.UserName: {Exec: execConfig}},
|
||||
Contexts: map[string]*clientcmdapi.Context{newNames.ContextName: {Cluster: newNames.ClusterName, AuthInfo: newNames.UserName}},
|
||||
CurrentContext: newNames.ContextName,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -560,18 +597,6 @@ func writeConfigAsYAML(out io.Writer, config clientcmdapi.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyCurrentClusterFromExistingKubeConfig(currentKubeConfig clientcmdapi.Config, currentContextNameOverride string) (*clientcmdapi.Cluster, error) {
|
||||
contextName := currentKubeConfig.CurrentContext
|
||||
if currentContextNameOverride != "" {
|
||||
contextName = currentContextNameOverride
|
||||
}
|
||||
ctx := currentKubeConfig.Contexts[contextName]
|
||||
if ctx == nil {
|
||||
return nil, fmt.Errorf("no such context %q", contextName)
|
||||
}
|
||||
return currentKubeConfig.Clusters[ctx.Cluster], nil
|
||||
}
|
||||
|
||||
func validateKubeconfig(ctx context.Context, flags getKubeconfigParams, kubeconfig clientcmdapi.Config, log logr.Logger) error {
|
||||
if flags.skipValidate {
|
||||
return nil
|
||||
|
||||
@@ -73,6 +73,8 @@ func TestGetKubeconfig(t *testing.T) {
|
||||
--concierge-endpoint string API base for the Concierge endpoint
|
||||
--concierge-mode mode Concierge mode of operation (default TokenCredentialRequestAPI)
|
||||
--concierge-skip-wait Skip waiting for any pending Concierge strategies to become ready (default: false)
|
||||
--credential-cache string Path to cluster-specific credentials cache
|
||||
--generated-name-suffix string Suffix to append to generated cluster, context, user kubeconfig entries (default "-pinniped")
|
||||
-h, --help help for kubeconfig
|
||||
--kubeconfig string Path to kubeconfig file
|
||||
--kubeconfig-context string Kubeconfig context name (default: current active context)
|
||||
@@ -133,7 +135,7 @@ func TestGetKubeconfig(t *testing.T) {
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "invalid kubeconfig context",
|
||||
name: "invalid kubeconfig context, missing",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--kubeconfig-context", "invalid",
|
||||
@@ -143,6 +145,28 @@ func TestGetKubeconfig(t *testing.T) {
|
||||
Error: could not load --kubeconfig/--kubeconfig-context: no such context "invalid"
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "invalid kubeconfig context, missing cluster",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--kubeconfig-context", "invalid-context-no-such-cluster",
|
||||
},
|
||||
wantError: true,
|
||||
wantStderr: here.Doc(`
|
||||
Error: could not load --kubeconfig/--kubeconfig-context: no such cluster "invalid-cluster"
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "invalid kubeconfig context, missing user",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--kubeconfig-context", "invalid-context-no-such-user",
|
||||
},
|
||||
wantError: true,
|
||||
wantStderr: here.Doc(`
|
||||
Error: could not load --kubeconfig/--kubeconfig-context: no such user "invalid-user"
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "clientset creation failure",
|
||||
args: []string{
|
||||
@@ -584,17 +608,17 @@ func TestGetKubeconfig(t *testing.T) {
|
||||
- cluster:
|
||||
certificate-authority-data: ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==
|
||||
server: https://fake-server-url-value
|
||||
name: pinniped
|
||||
name: kind-cluster-pinniped
|
||||
contexts:
|
||||
- context:
|
||||
cluster: pinniped
|
||||
user: pinniped
|
||||
name: pinniped
|
||||
current-context: pinniped
|
||||
cluster: kind-cluster-pinniped
|
||||
user: kind-user-pinniped
|
||||
name: kind-context-pinniped
|
||||
current-context: kind-context-pinniped
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: pinniped
|
||||
- name: kind-user-pinniped
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
@@ -619,6 +643,7 @@ func TestGetKubeconfig(t *testing.T) {
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--static-token-env", "TEST_TOKEN",
|
||||
"--skip-validation",
|
||||
"--credential-cache", "",
|
||||
},
|
||||
conciergeObjects: []runtime.Object{
|
||||
&configv1alpha1.CredentialIssuer{
|
||||
@@ -653,17 +678,17 @@ func TestGetKubeconfig(t *testing.T) {
|
||||
- cluster:
|
||||
certificate-authority-data: ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==
|
||||
server: https://fake-server-url-value
|
||||
name: pinniped
|
||||
name: kind-cluster-pinniped
|
||||
contexts:
|
||||
- context:
|
||||
cluster: pinniped
|
||||
user: pinniped
|
||||
name: pinniped
|
||||
current-context: pinniped
|
||||
cluster: kind-cluster-pinniped
|
||||
user: kind-user-pinniped
|
||||
name: kind-context-pinniped
|
||||
current-context: kind-context-pinniped
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: pinniped
|
||||
- name: kind-user-pinniped
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
@@ -676,6 +701,7 @@ func TestGetKubeconfig(t *testing.T) {
|
||||
- --concierge-authenticator-type=webhook
|
||||
- --concierge-endpoint=https://fake-server-url-value
|
||||
- --concierge-ca-bundle-data=ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==
|
||||
- --credential-cache=
|
||||
- --token-env=TEST_TOKEN
|
||||
command: '.../path/to/pinniped'
|
||||
env: []
|
||||
@@ -733,17 +759,17 @@ func TestGetKubeconfig(t *testing.T) {
|
||||
- cluster:
|
||||
certificate-authority-data: ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==
|
||||
server: https://fake-server-url-value
|
||||
name: pinniped
|
||||
name: kind-cluster-pinniped
|
||||
contexts:
|
||||
- context:
|
||||
cluster: pinniped
|
||||
user: pinniped
|
||||
name: pinniped
|
||||
current-context: pinniped
|
||||
cluster: kind-cluster-pinniped
|
||||
user: kind-user-pinniped
|
||||
name: kind-context-pinniped
|
||||
current-context: kind-context-pinniped
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: pinniped
|
||||
- name: kind-user-pinniped
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
@@ -785,6 +811,8 @@ func TestGetKubeconfig(t *testing.T) {
|
||||
"--oidc-debug-session-cache",
|
||||
"--oidc-request-audience", "test-audience",
|
||||
"--skip-validation",
|
||||
"--generated-name-suffix", "-sso",
|
||||
"--credential-cache", "/path/to/cache/dir/credentials.yaml",
|
||||
},
|
||||
conciergeObjects: []runtime.Object{
|
||||
&configv1alpha1.CredentialIssuer{
|
||||
@@ -815,17 +843,17 @@ func TestGetKubeconfig(t *testing.T) {
|
||||
- cluster:
|
||||
certificate-authority-data: %s
|
||||
server: https://explicit-concierge-endpoint.example.com
|
||||
name: pinniped
|
||||
name: kind-cluster-sso
|
||||
contexts:
|
||||
- context:
|
||||
cluster: pinniped
|
||||
user: pinniped
|
||||
name: pinniped
|
||||
current-context: pinniped
|
||||
cluster: kind-cluster-sso
|
||||
user: kind-user-sso
|
||||
name: kind-context-sso
|
||||
current-context: kind-context-sso
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: pinniped
|
||||
- name: kind-user-sso
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
@@ -838,6 +866,7 @@ func TestGetKubeconfig(t *testing.T) {
|
||||
- --concierge-authenticator-type=webhook
|
||||
- --concierge-endpoint=https://explicit-concierge-endpoint.example.com
|
||||
- --concierge-ca-bundle-data=%s
|
||||
- --credential-cache=/path/to/cache/dir/credentials.yaml
|
||||
- --issuer=https://example.com/issuer
|
||||
- --client-id=pinniped-cli
|
||||
- --scopes=offline_access,openid,pinniped:request-audience
|
||||
@@ -929,17 +958,17 @@ func TestGetKubeconfig(t *testing.T) {
|
||||
- cluster:
|
||||
certificate-authority-data: %s
|
||||
server: https://impersonation-proxy-endpoint.test
|
||||
name: pinniped
|
||||
name: kind-cluster-pinniped
|
||||
contexts:
|
||||
- context:
|
||||
cluster: pinniped
|
||||
user: pinniped
|
||||
name: pinniped
|
||||
current-context: pinniped
|
||||
cluster: kind-cluster-pinniped
|
||||
user: kind-user-pinniped
|
||||
name: kind-context-pinniped
|
||||
current-context: kind-context-pinniped
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: pinniped
|
||||
- name: kind-user-pinniped
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
@@ -1035,17 +1064,17 @@ func TestGetKubeconfig(t *testing.T) {
|
||||
- cluster:
|
||||
certificate-authority-data: dGVzdC1jb25jaWVyZ2UtY2E=
|
||||
server: https://impersonation-proxy-endpoint.test
|
||||
name: pinniped
|
||||
name: kind-cluster-pinniped
|
||||
contexts:
|
||||
- context:
|
||||
cluster: pinniped
|
||||
user: pinniped
|
||||
name: pinniped
|
||||
current-context: pinniped
|
||||
cluster: kind-cluster-pinniped
|
||||
user: kind-user-pinniped
|
||||
name: kind-context-pinniped
|
||||
current-context: kind-context-pinniped
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: pinniped
|
||||
- name: kind-user-pinniped
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
|
||||
@@ -5,6 +5,8 @@ package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
"k8s.io/client-go/tools/auth/exec"
|
||||
)
|
||||
|
||||
//nolint: gochecknoglobals
|
||||
@@ -20,3 +22,15 @@ var loginCmd = &cobra.Command{
|
||||
func init() {
|
||||
rootCmd.AddCommand(loginCmd)
|
||||
}
|
||||
|
||||
func loadClusterInfo() *clientauthv1beta1.Cluster {
|
||||
obj, _, err := exec.LoadExecCredentialFromEnv()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
cred, ok := obj.(*clientauthv1beta1.ExecCredential)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return cred.Spec.Cluster
|
||||
}
|
||||
|
||||
@@ -20,9 +20,12 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
"k8s.io/client-go/transport"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
|
||||
"go.pinniped.dev/internal/execcredcache"
|
||||
"go.pinniped.dev/internal/groupsuffix"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
"go.pinniped.dev/pkg/conciergeclient"
|
||||
"go.pinniped.dev/pkg/oidcclient"
|
||||
"go.pinniped.dev/pkg/oidcclient/filesession"
|
||||
@@ -35,13 +38,15 @@ func init() {
|
||||
}
|
||||
|
||||
type oidcLoginCommandDeps struct {
|
||||
lookupEnv func(string) (string, bool)
|
||||
login func(string, string, ...oidcclient.Option) (*oidctypes.Token, error)
|
||||
exchangeToken func(context.Context, *conciergeclient.Client, string) (*clientauthv1beta1.ExecCredential, error)
|
||||
}
|
||||
|
||||
func oidcLoginCommandRealDeps() oidcLoginCommandDeps {
|
||||
return oidcLoginCommandDeps{
|
||||
login: oidcclient.Login,
|
||||
lookupEnv: os.LookupEnv,
|
||||
login: oidcclient.Login,
|
||||
exchangeToken: func(ctx context.Context, client *conciergeclient.Client, token string) (*clientauthv1beta1.ExecCredential, error) {
|
||||
return client.ExchangeToken(ctx, token)
|
||||
},
|
||||
@@ -65,6 +70,7 @@ type oidcLoginFlags struct {
|
||||
conciergeEndpoint string
|
||||
conciergeCABundle string
|
||||
conciergeAPIGroupSuffix string
|
||||
credentialCachePath string
|
||||
}
|
||||
|
||||
func oidcLoginCommand(deps oidcLoginCommandDeps) *cobra.Command {
|
||||
@@ -95,6 +101,7 @@ func oidcLoginCommand(deps oidcLoginCommandDeps) *cobra.Command {
|
||||
cmd.Flags().StringVar(&flags.conciergeEndpoint, "concierge-endpoint", "", "API base for the Concierge endpoint")
|
||||
cmd.Flags().StringVar(&flags.conciergeCABundle, "concierge-ca-bundle-data", "", "CA bundle to use when connecting to the Concierge")
|
||||
cmd.Flags().StringVar(&flags.conciergeAPIGroupSuffix, "concierge-api-group-suffix", groupsuffix.PinnipedDefaultSuffix, "Concierge API group suffix")
|
||||
cmd.Flags().StringVar(&flags.credentialCachePath, "credential-cache", filepath.Join(mustGetConfigDir(), "credentials.yaml"), "Path to cluster-specific credentials cache (\"\" disables the cache)")
|
||||
|
||||
mustMarkHidden(cmd, "debug-session-cache")
|
||||
mustMarkRequired(cmd, "issuer")
|
||||
@@ -107,6 +114,11 @@ func oidcLoginCommand(deps oidcLoginCommandDeps) *cobra.Command {
|
||||
}
|
||||
|
||||
func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLoginFlags) error {
|
||||
pLogger, err := SetLogLevel(deps.lookupEnv)
|
||||
if err != nil {
|
||||
plog.WarningErr("Received error while setting log level", err)
|
||||
}
|
||||
|
||||
// Initialize the session cache.
|
||||
var sessionOptions []filesession.Option
|
||||
|
||||
@@ -122,6 +134,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin
|
||||
// Initialize the login handler.
|
||||
opts := []oidcclient.Option{
|
||||
oidcclient.WithContext(cmd.Context()),
|
||||
oidcclient.WithLogger(klogr.New()),
|
||||
oidcclient.WithScopes(flags.scopes),
|
||||
oidcclient.WithSessionCache(sessionCache),
|
||||
}
|
||||
@@ -163,7 +176,24 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin
|
||||
}
|
||||
opts = append(opts, oidcclient.WithClient(client))
|
||||
}
|
||||
// Look up cached credentials based on a hash of all the CLI arguments and the cluster info.
|
||||
cacheKey := struct {
|
||||
Args []string `json:"args"`
|
||||
ClusterInfo *clientauthv1beta1.Cluster `json:"cluster"`
|
||||
}{
|
||||
Args: os.Args[1:],
|
||||
ClusterInfo: loadClusterInfo(),
|
||||
}
|
||||
var credCache *execcredcache.Cache
|
||||
if flags.credentialCachePath != "" {
|
||||
credCache = execcredcache.New(flags.credentialCachePath)
|
||||
if cred := credCache.Get(cacheKey); cred != nil {
|
||||
pLogger.Debug("using cached cluster credential.")
|
||||
return json.NewEncoder(cmd.OutOrStdout()).Encode(cred)
|
||||
}
|
||||
}
|
||||
|
||||
pLogger.Debug("Performing OIDC login", "issuer", flags.issuer, "client id", flags.clientID)
|
||||
// Do the basic login to get an OIDC token.
|
||||
token, err := deps.login(flags.issuer, flags.clientID, opts...)
|
||||
if err != nil {
|
||||
@@ -173,6 +203,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin
|
||||
|
||||
// If the concierge was configured, exchange the credential for a separate short-lived, cluster-specific credential.
|
||||
if concierge != nil {
|
||||
pLogger.Debug("Exchanging token for cluster credential", "endpoint", flags.conciergeEndpoint, "authenticator type", flags.conciergeAuthenticatorType, "authenticator name", flags.conciergeAuthenticatorName)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
@@ -180,6 +211,15 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not complete Concierge credential exchange: %w", err)
|
||||
}
|
||||
pLogger.Debug("Successfully exchanged token for cluster credential.")
|
||||
} else {
|
||||
pLogger.Debug("No concierge configured, skipping token credential exchange")
|
||||
}
|
||||
|
||||
// If there was a credential cache, save the resulting credential for future use.
|
||||
if credCache != nil {
|
||||
pLogger.Debug("caching cluster credential for future use.")
|
||||
credCache.Put(cacheKey, cred)
|
||||
}
|
||||
return json.NewEncoder(cmd.OutOrStdout()).Encode(cred)
|
||||
}
|
||||
@@ -200,7 +240,7 @@ func makeClient(caBundlePaths []string, caBundleData []string) (*http.Client, er
|
||||
}
|
||||
pool.AppendCertsFromPEM(pem)
|
||||
}
|
||||
return &http.Client{
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: &tls.Config{
|
||||
@@ -208,7 +248,10 @@ func makeClient(caBundlePaths []string, caBundleData []string) (*http.Client, er
|
||||
MinVersion: tls.VersionTLS12,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
client.Transport = transport.DebugWrappers(client.Transport)
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential {
|
||||
@@ -227,6 +270,18 @@ func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential {
|
||||
return &cred
|
||||
}
|
||||
|
||||
func SetLogLevel(lookupEnv func(string) (string, bool)) (*plog.PLogger, error) {
|
||||
debug, _ := lookupEnv("PINNIPED_DEBUG")
|
||||
if debug == "true" {
|
||||
err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
logger := plog.New("Pinniped login: ")
|
||||
return &logger, nil
|
||||
}
|
||||
|
||||
// mustGetConfigDir returns a directory that follows the XDG base directory convention:
|
||||
// $XDG_CONFIG_HOME defines the base directory relative to which user specific configuration files should
|
||||
// be stored. If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config should be used.
|
||||
|
||||
@@ -16,10 +16,12 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"go.pinniped.dev/internal/certauthority"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
"go.pinniped.dev/internal/testutil/testlogger"
|
||||
"go.pinniped.dev/pkg/conciergeclient"
|
||||
"go.pinniped.dev/pkg/oidcclient"
|
||||
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
||||
@@ -41,10 +43,12 @@ func TestLoginOIDCCommand(t *testing.T) {
|
||||
args []string
|
||||
loginErr error
|
||||
conciergeErr error
|
||||
env map[string]string
|
||||
wantError bool
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
wantOptionsCount int
|
||||
wantLogs []string
|
||||
}{
|
||||
{
|
||||
name: "help flag passed",
|
||||
@@ -64,6 +68,7 @@ func TestLoginOIDCCommand(t *testing.T) {
|
||||
--concierge-authenticator-type string Concierge authenticator type (e.g., 'webhook', 'jwt')
|
||||
--concierge-ca-bundle-data string CA bundle to use when connecting to the Concierge
|
||||
--concierge-endpoint string API base for the Concierge endpoint
|
||||
--credential-cache string Path to cluster-specific credentials cache ("" disables the cache) (default "` + cfgDir + `/credentials.yaml")
|
||||
--enable-concierge Use the Concierge to login
|
||||
-h, --help help for oidc
|
||||
--issuer string OpenID Connect issuer URL
|
||||
@@ -140,7 +145,7 @@ func TestLoginOIDCCommand(t *testing.T) {
|
||||
"--issuer", "test-issuer",
|
||||
},
|
||||
loginErr: fmt.Errorf("some login error"),
|
||||
wantOptionsCount: 3,
|
||||
wantOptionsCount: 4,
|
||||
wantError: true,
|
||||
wantStderr: here.Doc(`
|
||||
Error: could not complete Pinniped login: some login error
|
||||
@@ -157,7 +162,7 @@ func TestLoginOIDCCommand(t *testing.T) {
|
||||
"--concierge-endpoint", "https://127.0.0.1:1234/",
|
||||
},
|
||||
conciergeErr: fmt.Errorf("some concierge error"),
|
||||
wantOptionsCount: 3,
|
||||
wantOptionsCount: 4,
|
||||
wantError: true,
|
||||
wantStderr: here.Doc(`
|
||||
Error: could not complete Concierge credential exchange: some concierge error
|
||||
@@ -169,8 +174,14 @@ func TestLoginOIDCCommand(t *testing.T) {
|
||||
"--client-id", "test-client-id",
|
||||
"--issuer", "test-issuer",
|
||||
},
|
||||
wantOptionsCount: 3,
|
||||
env: map[string]string{"PINNIPED_DEBUG": "true"},
|
||||
wantOptionsCount: 4,
|
||||
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n",
|
||||
wantLogs: []string{
|
||||
"\"level\"=0 \"msg\"=\"Pinniped login: Performing OIDC login\" \"client id\"=\"test-client-id\" \"issuer\"=\"test-issuer\"",
|
||||
"\"level\"=0 \"msg\"=\"Pinniped login: No concierge configured, skipping token credential exchange\"",
|
||||
"\"level\"=0 \"msg\"=\"Pinniped login: caching cluster credential for future use.\"",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "success with all options",
|
||||
@@ -189,18 +200,32 @@ func TestLoginOIDCCommand(t *testing.T) {
|
||||
"--concierge-endpoint", "https://127.0.0.1:1234/",
|
||||
"--concierge-ca-bundle-data", base64.StdEncoding.EncodeToString(testCA.Bundle()),
|
||||
"--concierge-api-group-suffix", "some.suffix.com",
|
||||
"--credential-cache", testutil.TempDir(t) + "/credentials.yaml",
|
||||
},
|
||||
wantOptionsCount: 7,
|
||||
env: map[string]string{"PINNIPED_DEBUG": "true"},
|
||||
wantOptionsCount: 8,
|
||||
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"token":"exchanged-token"}}` + "\n",
|
||||
wantLogs: []string{
|
||||
"\"level\"=0 \"msg\"=\"Pinniped login: Performing OIDC login\" \"client id\"=\"test-client-id\" \"issuer\"=\"test-issuer\"",
|
||||
"\"level\"=0 \"msg\"=\"Pinniped login: Exchanging token for cluster credential\" \"authenticator name\"=\"test-authenticator\" \"authenticator type\"=\"webhook\" \"endpoint\"=\"https://127.0.0.1:1234/\"",
|
||||
"\"level\"=0 \"msg\"=\"Pinniped login: Successfully exchanged token for cluster credential.\"",
|
||||
"\"level\"=0 \"msg\"=\"Pinniped login: caching cluster credential for future use.\"",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testLogger := testlogger.New(t)
|
||||
klog.SetLogger(testLogger)
|
||||
var (
|
||||
gotOptions []oidcclient.Option
|
||||
)
|
||||
cmd := oidcLoginCommand(oidcLoginCommandDeps{
|
||||
lookupEnv: func(s string) (string, bool) {
|
||||
v, ok := tt.env[s]
|
||||
return v, ok
|
||||
},
|
||||
login: func(issuer string, clientID string, opts ...oidcclient.Option) (*oidctypes.Token, error) {
|
||||
require.Equal(t, "test-issuer", issuer)
|
||||
require.Equal(t, "test-client-id", clientID)
|
||||
@@ -246,6 +271,8 @@ func TestLoginOIDCCommand(t *testing.T) {
|
||||
require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout")
|
||||
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
||||
require.Len(t, gotOptions, tt.wantOptionsCount)
|
||||
|
||||
require.Equal(t, tt.wantLogs, testLogger.Lines())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,12 +9,15 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
|
||||
"go.pinniped.dev/internal/execcredcache"
|
||||
"go.pinniped.dev/internal/groupsuffix"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
"go.pinniped.dev/pkg/conciergeclient"
|
||||
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
||||
)
|
||||
@@ -47,6 +50,7 @@ type staticLoginParams struct {
|
||||
conciergeEndpoint string
|
||||
conciergeCABundle string
|
||||
conciergeAPIGroupSuffix string
|
||||
credentialCachePath string
|
||||
}
|
||||
|
||||
func staticLoginCommand(deps staticLoginDeps) *cobra.Command {
|
||||
@@ -69,6 +73,7 @@ func staticLoginCommand(deps staticLoginDeps) *cobra.Command {
|
||||
cmd.Flags().StringVar(&flags.conciergeEndpoint, "concierge-endpoint", "", "API base for the Concierge endpoint")
|
||||
cmd.Flags().StringVar(&flags.conciergeCABundle, "concierge-ca-bundle-data", "", "CA bundle to use when connecting to the Concierge")
|
||||
cmd.Flags().StringVar(&flags.conciergeAPIGroupSuffix, "concierge-api-group-suffix", groupsuffix.PinnipedDefaultSuffix, "Concierge API group suffix")
|
||||
cmd.Flags().StringVar(&flags.credentialCachePath, "credential-cache", filepath.Join(mustGetConfigDir(), "credentials.yaml"), "Path to cluster-specific credentials cache (\"\" disables the cache)")
|
||||
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error { return runStaticLogin(cmd.OutOrStdout(), deps, flags) }
|
||||
|
||||
@@ -79,6 +84,11 @@ func staticLoginCommand(deps staticLoginDeps) *cobra.Command {
|
||||
}
|
||||
|
||||
func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams) error {
|
||||
pLogger, err := SetLogLevel(deps.lookupEnv)
|
||||
if err != nil {
|
||||
plog.WarningErr("Received error while setting log level", err)
|
||||
}
|
||||
|
||||
if flags.staticToken == "" && flags.staticTokenEnvName == "" {
|
||||
return fmt.Errorf("one of --token or --token-env must be set")
|
||||
}
|
||||
@@ -113,8 +123,28 @@ func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams
|
||||
}
|
||||
cred := tokenCredential(&oidctypes.Token{IDToken: &oidctypes.IDToken{Token: token}})
|
||||
|
||||
// Look up cached credentials based on a hash of all the CLI arguments, the current token value, and the cluster info.
|
||||
cacheKey := struct {
|
||||
Args []string `json:"args"`
|
||||
Token string `json:"token"`
|
||||
ClusterInfo *clientauthv1beta1.Cluster `json:"cluster"`
|
||||
}{
|
||||
Args: os.Args[1:],
|
||||
Token: token,
|
||||
ClusterInfo: loadClusterInfo(),
|
||||
}
|
||||
var credCache *execcredcache.Cache
|
||||
if flags.credentialCachePath != "" {
|
||||
credCache = execcredcache.New(flags.credentialCachePath)
|
||||
if cred := credCache.Get(cacheKey); cred != nil {
|
||||
pLogger.Debug("using cached cluster credential.")
|
||||
return json.NewEncoder(out).Encode(cred)
|
||||
}
|
||||
}
|
||||
|
||||
// If the concierge was configured, exchange the credential for a separate short-lived, cluster-specific credential.
|
||||
if concierge != nil {
|
||||
pLogger.Debug("exchanging static token for cluster credential", "endpoint", flags.conciergeEndpoint, "authenticator type", flags.conciergeAuthenticatorType, "authenticator name", flags.conciergeAuthenticatorName)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
@@ -123,6 +153,14 @@ func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not complete Concierge credential exchange: %w", err)
|
||||
}
|
||||
pLogger.Debug("exchanged static token for cluster credential")
|
||||
}
|
||||
|
||||
// If there was a credential cache, save the resulting credential for future use. We only save to the cache if
|
||||
// the credential came from the concierge, since that's the only static token case where the cache is useful.
|
||||
if credCache != nil && concierge != nil {
|
||||
credCache.Put(cacheKey, cred)
|
||||
}
|
||||
|
||||
return json.NewEncoder(out).Encode(cred)
|
||||
}
|
||||
|
||||
@@ -12,6 +12,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"go.pinniped.dev/internal/testutil/testlogger"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
@@ -23,6 +27,8 @@ import (
|
||||
)
|
||||
|
||||
func TestLoginStaticCommand(t *testing.T) {
|
||||
cfgDir := mustGetConfigDir()
|
||||
|
||||
testCA, err := certauthority.New("Test CA", 1*time.Hour)
|
||||
require.NoError(t, err)
|
||||
tmpdir := testutil.TempDir(t)
|
||||
@@ -39,6 +45,7 @@ func TestLoginStaticCommand(t *testing.T) {
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
wantOptionsCount int
|
||||
wantLogs []string
|
||||
}{
|
||||
{
|
||||
name: "help flag passed",
|
||||
@@ -55,6 +62,7 @@ func TestLoginStaticCommand(t *testing.T) {
|
||||
--concierge-authenticator-type string Concierge authenticator type (e.g., 'webhook', 'jwt')
|
||||
--concierge-ca-bundle-data string CA bundle to use when connecting to the Concierge
|
||||
--concierge-endpoint string API base for the Concierge endpoint
|
||||
--credential-cache string Path to cluster-specific credentials cache ("" disables the cache) (default "` + cfgDir + `/credentials.yaml")
|
||||
--enable-concierge Use the Concierge to login
|
||||
-h, --help help for static
|
||||
--token string Static token to present during login
|
||||
@@ -123,10 +131,12 @@ func TestLoginStaticCommand(t *testing.T) {
|
||||
"--concierge-authenticator-name", "test-authenticator",
|
||||
},
|
||||
conciergeErr: fmt.Errorf("some concierge error"),
|
||||
env: map[string]string{"PINNIPED_DEBUG": "true"},
|
||||
wantError: true,
|
||||
wantStderr: here.Doc(`
|
||||
Error: could not complete Concierge credential exchange: some concierge error
|
||||
`),
|
||||
wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped login: exchanging static token for cluster credential\" \"authenticator name\"=\"test-authenticator\" \"authenticator type\"=\"webhook\" \"endpoint\"=\"https://127.0.0.1/\""},
|
||||
},
|
||||
{
|
||||
name: "invalid API group suffix",
|
||||
@@ -148,12 +158,15 @@ func TestLoginStaticCommand(t *testing.T) {
|
||||
args: []string{
|
||||
"--token", "test-token",
|
||||
},
|
||||
env: map[string]string{"PINNIPED_DEBUG": "true"},
|
||||
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"token":"test-token"}}` + "\n",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testLogger := testlogger.New(t)
|
||||
klog.SetLogger(testLogger)
|
||||
cmd := staticLoginCommand(staticLoginDeps{
|
||||
lookupEnv: func(s string) (string, bool) {
|
||||
v, ok := tt.env[s]
|
||||
@@ -189,6 +202,8 @@ func TestLoginStaticCommand(t *testing.T) {
|
||||
}
|
||||
require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout")
|
||||
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
||||
|
||||
require.Equal(t, tt.wantLogs, testLogger.Lines())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
20
cmd/pinniped/cmd/testdata/kubeconfig.yaml
vendored
20
cmd/pinniped/cmd/testdata/kubeconfig.yaml
vendored
@@ -3,25 +3,33 @@ clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ== # fake-certificate-authority-data-value
|
||||
server: https://fake-server-url-value
|
||||
name: kind-kind
|
||||
name: kind-cluster
|
||||
- cluster:
|
||||
certificate-authority-data: c29tZS1vdGhlci1mYWtlLWNlcnRpZmljYXRlLWF1dGhvcml0eS1kYXRhLXZhbHVl # some-other-fake-certificate-authority-data-value
|
||||
server: https://some-other-fake-server-url-value
|
||||
name: some-other-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kind-kind
|
||||
user: kind-kind
|
||||
name: kind-kind
|
||||
cluster: kind-cluster
|
||||
user: kind-user
|
||||
name: kind-context
|
||||
- context:
|
||||
cluster: some-other-cluster
|
||||
user: some-other-user
|
||||
name: some-other-context
|
||||
current-context: kind-kind
|
||||
- context:
|
||||
cluster: invalid-cluster
|
||||
user: some-other-user
|
||||
name: invalid-context-no-such-cluster
|
||||
- context:
|
||||
cluster: some-other-cluster
|
||||
user: invalid-user
|
||||
name: invalid-context-no-such-user
|
||||
current-context: kind-context
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kind-kind
|
||||
- name: kind-user
|
||||
user:
|
||||
client-certificate-data: ZmFrZS1jbGllbnQtY2VydGlmaWNhdGUtZGF0YS12YWx1ZQ== # fake-client-certificate-data-value
|
||||
client-key-data: ZmFrZS1jbGllbnQta2V5LWRhdGEtdmFsdWU= # fake-client-key-data-value
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestWhoami(t *testing.T) {
|
||||
wantStdout: here.Doc(`
|
||||
Current cluster info:
|
||||
|
||||
Name: kind-kind
|
||||
Name: kind-cluster
|
||||
URL: https://fake-server-url-value
|
||||
|
||||
Current user info:
|
||||
@@ -68,7 +68,7 @@ func TestWhoami(t *testing.T) {
|
||||
wantStdout: here.Doc(`
|
||||
Current cluster info:
|
||||
|
||||
Name: kind-kind
|
||||
Name: kind-cluster
|
||||
URL: https://fake-server-url-value
|
||||
|
||||
Current user info:
|
||||
@@ -84,7 +84,7 @@ func TestWhoami(t *testing.T) {
|
||||
wantStdout: here.Doc(`
|
||||
Current cluster info:
|
||||
|
||||
Name: kind-kind
|
||||
Name: kind-cluster
|
||||
URL: https://fake-server-url-value
|
||||
|
||||
Current user info:
|
||||
@@ -100,7 +100,7 @@ func TestWhoami(t *testing.T) {
|
||||
wantStdout: here.Doc(`
|
||||
Current cluster info:
|
||||
|
||||
Name: kind-kind
|
||||
Name: kind-cluster
|
||||
URL: https://fake-server-url-value
|
||||
|
||||
Current user info:
|
||||
@@ -209,12 +209,12 @@ func TestWhoami(t *testing.T) {
|
||||
name: "different kubeconfig context, but same as current",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--kubeconfig-context", "kind-kind",
|
||||
"--kubeconfig-context", "kind-context",
|
||||
},
|
||||
wantStdout: here.Doc(`
|
||||
Current cluster info:
|
||||
|
||||
Name: kind-kind
|
||||
Name: kind-cluster
|
||||
URL: https://fake-server-url-value
|
||||
|
||||
Current user info:
|
||||
|
||||
@@ -22,6 +22,13 @@ metadata:
|
||||
labels: #@ labels()
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("config")
|
||||
@@ -47,6 +54,7 @@ data:
|
||||
impersonationTLSCertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-tls-serving-certificate") @)
|
||||
impersonationCACertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-ca-certificate") @)
|
||||
impersonationSignerSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-signer-ca-certificate") @)
|
||||
agentServiceAccount: (@= defaultResourceNameWithSuffix("kube-cert-agent") @)
|
||||
labels: (@= json.encode(labels()).rstrip() @)
|
||||
kubeCertAgent:
|
||||
namePrefix: (@= defaultResourceNameWithSuffix("kube-cert-agent-") @)
|
||||
|
||||
@@ -24,9 +24,6 @@ rules:
|
||||
- apiGroups: [ flowcontrol.apiserver.k8s.io ]
|
||||
resources: [ flowschemas, prioritylevelconfigurations ]
|
||||
verbs: [ get, list, watch ]
|
||||
- apiGroups: [ policy ]
|
||||
resources: [ podsecuritypolicies ]
|
||||
verbs: [ use ]
|
||||
- apiGroups: [ security.openshift.io ]
|
||||
resources: [ securitycontextconstraints ]
|
||||
verbs: [ use ]
|
||||
@@ -47,7 +44,7 @@ rules:
|
||||
- apiGroups:
|
||||
- #@ pinnipedDevAPIGroupWithPrefix("config.concierge")
|
||||
resources: [ credentialissuers/status ]
|
||||
verbs: [get, patch, update]
|
||||
verbs: [ get, patch, update ]
|
||||
- apiGroups:
|
||||
- #@ pinnipedDevAPIGroupWithPrefix("authentication.concierge")
|
||||
resources: [ jwtauthenticators, webhookauthenticators ]
|
||||
@@ -67,6 +64,34 @@ roleRef:
|
||||
name: #@ defaultResourceNameWithSuffix("aggregated-api-server")
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Give permission to the kube-cert-agent Pod to run privileged.
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
rules:
|
||||
- apiGroups: [ policy ]
|
||||
resources: [ podsecuritypolicies ]
|
||||
verbs: [ use ]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
|
||||
namespace: #@ namespace()
|
||||
labels: #@ labels()
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
|
||||
namespace: #@ namespace()
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
#! Give permission to various objects within the app's own namespace
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
@@ -82,16 +107,25 @@ rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ secrets ]
|
||||
verbs: [ create, get, list, patch, update, watch, delete ]
|
||||
#! We need to be able to CRUD pods in our namespace so we can reconcile the kube-cert-agent pods.
|
||||
#! We need to be able to watch pods in our namespace so we can find the kube-cert-agent pods.
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ pods ]
|
||||
verbs: [ create, get, list, patch, update, watch, delete ]
|
||||
verbs: [ get, list, watch ]
|
||||
#! We need to be able to exec into pods in our namespace so we can grab the API server's private key
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ pods/exec ]
|
||||
verbs: [ create ]
|
||||
#! We need to be able to delete pods in our namespace so we can clean up legacy kube-cert-agent pods.
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ pods ]
|
||||
verbs: [ delete ]
|
||||
#! We need to be able to create and update deployments in our namespace so we can manage the kube-cert-agent Deployment.
|
||||
- apiGroups: [ apps ]
|
||||
resources: [ replicasets,deployments ]
|
||||
resources: [ deployments ]
|
||||
verbs: [ create, get, list, patch, update, watch ]
|
||||
#! We need to be able to get replicasets so we can form the correct owner references on our generated objects.
|
||||
- apiGroups: [ apps ]
|
||||
resources: [ replicasets ]
|
||||
verbs: [ get ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ configmaps ]
|
||||
|
||||
18
go.mod
18
go.mod
@@ -26,20 +26,20 @@ require (
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.0
|
||||
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
gopkg.in/square/go-jose.v2 v2.5.1
|
||||
k8s.io/api v0.0.0-20210329192759-4cbcd86ea749
|
||||
k8s.io/apimachinery v0.21.0-alpha.0.0.20210329192153-640a6275d2b0
|
||||
k8s.io/apiserver v0.0.0-20210330222258-23775f4efbdf
|
||||
k8s.io/client-go v0.0.0-20210329194426-720ea497dc06
|
||||
k8s.io/component-base v0.0.0-20210329195309-e1576f54c4ca
|
||||
k8s.io/gengo v0.0.0-20201113003025-83324d819ded
|
||||
k8s.io/api v0.21.0
|
||||
k8s.io/apimachinery v0.21.0
|
||||
k8s.io/apiserver v0.21.0
|
||||
k8s.io/client-go v0.21.0
|
||||
k8s.io/component-base v0.21.0
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027
|
||||
k8s.io/klog/v2 v2.8.0
|
||||
k8s.io/kube-aggregator v0.0.0-20210329201137-c9d5b747f33b
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd
|
||||
k8s.io/kube-aggregator v0.21.0
|
||||
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
||||
89
go.sum
89
go.sum
@@ -35,14 +35,12 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1 h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
|
||||
github.com/Azure/go-autorest/autorest v0.11.12 h1:gI8ytXbxMfI+IVbI9mP2JGCTXIuhHLgRlvQ9X4PsnHE=
|
||||
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
|
||||
@@ -60,8 +58,9 @@ github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF0
|
||||
github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
@@ -147,6 +146,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
@@ -161,8 +161,6 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
@@ -199,7 +197,6 @@ github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
@@ -224,6 +221,7 @@ github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL9
|
||||
github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
|
||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||
github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
|
||||
github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ=
|
||||
github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg=
|
||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
@@ -667,7 +665,6 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
@@ -756,7 +753,9 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
||||
github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg=
|
||||
github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -945,8 +944,9 @@ github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
@@ -1097,8 +1097,8 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620 h1:3wPMTskHO3+O6jqTEXyFcsnuxMQOqYSaHsDxcbUXpqA=
|
||||
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1134,8 +1134,9 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 h1:xUIPaMhvROX9dhPvRCenIJtU78+lbEenGbgqB5hfHCQ=
|
||||
golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180816102801-aaf60122140d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1182,8 +1183,8 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -1270,13 +1271,16 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 h1:8qxJSnu+7dRq6upnbntrmriWByIakBuct5OM/MdQC1M=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1289,8 +1293,8 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -1370,8 +1374,9 @@ golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roY
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200721223218-6123e77877b2/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1512,6 +1517,7 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclp
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
@@ -1519,31 +1525,28 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
|
||||
k8s.io/api v0.0.0-20210329192759-4cbcd86ea749 h1:FhxDmQX91mo1Xsh6quWX769L65eJPP8kNMTeIw6d6po=
|
||||
k8s.io/api v0.0.0-20210329192759-4cbcd86ea749/go.mod h1:cgILOv2D1tPmboo8I/DGUMNkV0C3JtN5aAEYvcTyKQc=
|
||||
k8s.io/apimachinery v0.0.0-20210329192153-640a6275d2b0/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
|
||||
k8s.io/apimachinery v0.21.0-alpha.0.0.20210329192153-640a6275d2b0 h1:ikCWZMv/0K5O7JrQzU2r3UwSfas+7jPk/ADWV+SYPFk=
|
||||
k8s.io/apimachinery v0.21.0-alpha.0.0.20210329192153-640a6275d2b0/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
|
||||
k8s.io/apiserver v0.0.0-20210329200458-395cee214d8e/go.mod h1:OORuX3x2IIO92Bj0kYiJ5RUkxHQHRbh0zT/dc9SXxdM=
|
||||
k8s.io/apiserver v0.0.0-20210330222258-23775f4efbdf h1:g9BX+PSd1dGlB3IsRrvN1ntgHWeYsqewoLUB5+PqyFk=
|
||||
k8s.io/apiserver v0.0.0-20210330222258-23775f4efbdf/go.mod h1:OORuX3x2IIO92Bj0kYiJ5RUkxHQHRbh0zT/dc9SXxdM=
|
||||
k8s.io/client-go v0.0.0-20210329194426-720ea497dc06 h1:ig00mgkRCia3Lfr2l1uHTESYtuRFEV6Fl1NWWwvfd6E=
|
||||
k8s.io/client-go v0.0.0-20210329194426-720ea497dc06/go.mod h1:1C1ztLCJQP6JaCIcN/gJ4tjQI5EDBsuD3fQ6wQCY17I=
|
||||
k8s.io/code-generator v0.0.0-20210329191617-48c1e31cd8b3/go.mod h1:i6FmG+QxaLxvJsezvZp0q/gAEzzOz3U53KFibghWToU=
|
||||
k8s.io/component-base v0.0.0-20210329195309-e1576f54c4ca h1:vXQs8TtiZCMlRO1AfYgSIgvoHKvqjP72WO90rzu3JNg=
|
||||
k8s.io/component-base v0.0.0-20210329195309-e1576f54c4ca/go.mod h1:0GA/S/qw95GXEDv164YZl1I0s52DVkqcWMi9gskopDo=
|
||||
k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y=
|
||||
k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU=
|
||||
k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA=
|
||||
k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
|
||||
k8s.io/apiserver v0.21.0 h1:1hWMfsz+cXxB77k6/y0XxWxwl6l9OF26PC9QneUVn1Q=
|
||||
k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg=
|
||||
k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag=
|
||||
k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA=
|
||||
k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q=
|
||||
k8s.io/component-base v0.21.0 h1:tLLGp4BBjQaCpS/KiuWh7m2xqvAdsxLm4ATxHSe5Zpg=
|
||||
k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201113003025-83324d819ded h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt5A93FPvFD0HIFE=
|
||||
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts=
|
||||
k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/kube-aggregator v0.0.0-20210329201137-c9d5b747f33b h1:HDslh4s73TWDjm0BxVMOXtut7Q/o9Sz+xHSf2nmR8uw=
|
||||
k8s.io/kube-aggregator v0.0.0-20210329201137-c9d5b747f33b/go.mod h1:DUuPy83ZThcPzC5SGxS3t67M5VDd85+GyCOKhvIxbqA=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/kube-aggregator v0.21.0 h1:my2WYu8RJcj/ZzWAjPPnmxNRELk/iCdPjMaOmsZOeBU=
|
||||
k8s.io/kube-aggregator v0.21.0/go.mod h1:sIaa9L4QCBo9gjPyoGJns4cBjYVLq3s49FxF7m/1A0A=
|
||||
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0=
|
||||
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
@@ -1558,8 +1561,8 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.3 h1:4oyYo8NREp49LBBhKxEqCulFjg26rawYKrnCmg+Sr6c=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
||||
22
hack/integration-test-env-goland.sh
Executable file
22
hack/integration-test-env-goland.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#
|
||||
# Print the PINNIPED_TEST_* env vars from /tmp/integration-test-env in a format that can be used in GoLand.
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
|
||||
|
||||
source /tmp/integration-test-env
|
||||
|
||||
printenv | grep PINNIPED_TEST_ | sed 's/=.*//g' | grep -v CLUSTER_CAPABILITY_YAML | while read -r var ; do
|
||||
echo -n "${var}="
|
||||
echo -n "${!var}" | tr -d '\n'
|
||||
echo -n ";"
|
||||
done
|
||||
|
||||
echo -n "PINNIPED_TEST_CLUSTER_CAPABILITY_FILE=${ROOT}/test/cluster_capabilities/kind.yaml"
|
||||
@@ -1,203 +0,0 @@
|
||||
load('ext://restart_process', 'docker_build_with_restart')
|
||||
disable_snapshots()
|
||||
analytics_settings(False)
|
||||
update_settings(max_parallel_updates=8)
|
||||
os.putenv('CGO_ENABLED', '0')
|
||||
os.putenv('GOOS', 'linux')
|
||||
os.putenv('GOARCH', 'amd64')
|
||||
os.putenv('CGO_ENABLED', '0')
|
||||
os.putenv('KUBE_GIT_VERSION', 'v0.0.0')
|
||||
|
||||
#####################################################################################################
|
||||
# Compile all of our ./cmd/... binaries.
|
||||
#
|
||||
|
||||
local_resource(
|
||||
'compile',
|
||||
'cd ../../../ && mkdir -p ./hack/lib/tilt/build && go build -v -ldflags "$(hack/get-ldflags.sh)" -o ./hack/lib/tilt/build ./cmd/...',
|
||||
deps=['../../../cmd', '../../../internal', '../../../pkg', '../../../generated'],
|
||||
)
|
||||
|
||||
#####################################################################################################
|
||||
# Test IDP (Dex + cert generation + squid proxy)
|
||||
#
|
||||
|
||||
# Render the IDP installation manifest using ytt.
|
||||
k8s_yaml(local(['ytt',
|
||||
'--file', '../../../test/deploy/dex',
|
||||
'--data-value-yaml', 'supervisor_redirect_uris=[https://pinniped-supervisor-clusterip.supervisor.svc.cluster.local/some/path/callback]',
|
||||
]))
|
||||
# Tell tilt to watch all of those files for changes.
|
||||
watch_file('../../../test/deploy/dex')
|
||||
|
||||
k8s_resource(objects=['dex:namespace'], new_name='dex-ns')
|
||||
k8s_resource(workload='cert-issuer', resource_deps=['dex-ns'], objects=[
|
||||
'cert-issuer:serviceaccount',
|
||||
'cert-issuer:role',
|
||||
'cert-issuer:rolebinding',
|
||||
])
|
||||
k8s_resource(workload='proxy', resource_deps=['dex-ns'])
|
||||
k8s_resource(workload='dex', resource_deps=['dex-ns', 'cert-issuer'], objects=[
|
||||
'dex-config:configmap',
|
||||
])
|
||||
|
||||
|
||||
#####################################################################################################
|
||||
# Local-user-authenticator app
|
||||
#
|
||||
|
||||
# Build a container image for local-user-authenticator, with live-update enabled.
|
||||
docker_build_with_restart('image/local-user-auth', '.',
|
||||
dockerfile='local-user-authenticator.Dockerfile',
|
||||
entrypoint=['/usr/local/bin/local-user-authenticator'],
|
||||
live_update=[sync('./build/local-user-authenticator', '/usr/local/bin/local-user-authenticator')],
|
||||
only=['./build/local-user-authenticator'],
|
||||
)
|
||||
|
||||
# Render the local-user-authenticator installation manifest using ytt.
|
||||
k8s_yaml(local([
|
||||
'ytt',
|
||||
'--file', '../../../deploy/local-user-authenticator',
|
||||
'--data-value', 'image_repo=image/local-user-auth',
|
||||
'--data-value', 'image_tag=tilt-dev',
|
||||
'--data-value-yaml', 'run_as_user=0',
|
||||
'--data-value-yaml', 'run_as_group=0',
|
||||
]))
|
||||
# Tell tilt to watch all of those files for changes.
|
||||
watch_file('../../../deploy/local-user-authenticator')
|
||||
|
||||
# Collect all the deployed local-user-authenticator resources under a "local-user-auth" resource tab.
|
||||
k8s_resource(
|
||||
workload='local-user-authenticator', # this is the deployment name
|
||||
new_name='local-user-auth', # this is the name that will appear in the tilt UI
|
||||
objects=[
|
||||
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
|
||||
'local-user-authenticator:namespace',
|
||||
'local-user-authenticator:serviceaccount',
|
||||
'local-user-authenticator:role',
|
||||
'local-user-authenticator:rolebinding',
|
||||
],
|
||||
)
|
||||
|
||||
#####################################################################################################
|
||||
# Supervisor app
|
||||
#
|
||||
|
||||
# Build a container image for supervisor, with live-update enabled.
|
||||
docker_build_with_restart('image/supervisor', '.',
|
||||
dockerfile='supervisor.Dockerfile',
|
||||
entrypoint=['/usr/local/bin/pinniped-supervisor'],
|
||||
live_update=[sync('./build/pinniped-supervisor', '/usr/local/bin/pinniped-supervisor')],
|
||||
only=['./build/pinniped-supervisor'],
|
||||
)
|
||||
|
||||
# Render the supervisor installation manifest using ytt.
|
||||
#
|
||||
# 31234 and 31243 are the same port numbers hardcoded in the port forwarding of our kind configuration.
|
||||
# Don't think that you can just change this!
|
||||
k8s_yaml(local([
|
||||
'ytt',
|
||||
'--file', '../../../deploy/supervisor',
|
||||
'--data-value', 'app_name=pinniped-supervisor',
|
||||
'--data-value', 'namespace=supervisor',
|
||||
'--data-value', 'image_repo=image/supervisor',
|
||||
'--data-value', 'image_tag=tilt-dev',
|
||||
'--data-value', 'log_level=debug',
|
||||
'--data-value-yaml', 'replicas=1',
|
||||
'--data-value-yaml', 'service_http_nodeport_port=80',
|
||||
'--data-value-yaml', 'service_http_nodeport_nodeport=31234',
|
||||
'--data-value-yaml', 'service_https_nodeport_port=443',
|
||||
'--data-value-yaml', 'service_https_nodeport_nodeport=31243',
|
||||
'--data-value-yaml', 'service_https_clusterip_port=443',
|
||||
'--data-value-yaml', 'custom_labels={mySupervisorCustomLabelName: mySupervisorCustomLabelValue}',
|
||||
'--data-value-yaml', 'run_as_user=0',
|
||||
'--data-value-yaml', 'run_as_group=0',
|
||||
]))
|
||||
# Tell tilt to watch all of those files for changes.
|
||||
watch_file('../../../deploy/supervisor')
|
||||
|
||||
# Collect all the deployed supervisor resources under a "supervisor" resource tab.
|
||||
k8s_resource(
|
||||
workload='pinniped-supervisor', # this is the deployment name
|
||||
new_name='supervisor', # this is the name that will appear in the tilt UI
|
||||
objects=[
|
||||
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
|
||||
'federationdomains.config.supervisor.pinniped.dev:customresourcedefinition',
|
||||
'oidcidentityproviders.idp.supervisor.pinniped.dev:customresourcedefinition',
|
||||
'pinniped-supervisor-static-config:configmap',
|
||||
'supervisor:namespace',
|
||||
'pinniped-supervisor:role',
|
||||
'pinniped-supervisor:rolebinding',
|
||||
'pinniped-supervisor:serviceaccount',
|
||||
],
|
||||
)
|
||||
|
||||
# Build a container image for the Concierge server, with live-update enabled.
|
||||
docker_build_with_restart('image/concierge', '.',
|
||||
dockerfile='concierge.Dockerfile',
|
||||
entrypoint=['/usr/local/bin/pinniped-concierge'],
|
||||
live_update=[sync('./build/pinniped-concierge', '/usr/local/bin/pinniped-concierge')],
|
||||
only=['./build/pinniped-concierge'],
|
||||
)
|
||||
|
||||
#####################################################################################################
|
||||
# Concierge app
|
||||
#
|
||||
|
||||
# Render the Concierge server installation manifest using ytt.
|
||||
k8s_yaml(local([
|
||||
'sh', '-c',
|
||||
'ytt --file ../../../deploy/concierge ' +
|
||||
'--data-value app_name=pinniped-concierge ' +
|
||||
'--data-value namespace=concierge ' +
|
||||
'--data-value image_repo=image/concierge ' +
|
||||
'--data-value image_tag=tilt-dev ' +
|
||||
'--data-value kube_cert_agent_image=debian:10.8-slim ' +
|
||||
'--data-value discovery_url=$(TERM=dumb kubectl cluster-info | awk \'/master|control plane/ {print $NF}\') ' +
|
||||
'--data-value log_level=debug ' +
|
||||
'--data-value-yaml replicas=1 ' +
|
||||
'--data-value-yaml "custom_labels={myConciergeCustomLabelName: myConciergeCustomLabelValue}" ' +
|
||||
'--data-value-yaml run_as_user=0 ' +
|
||||
'--data-value-yaml run_as_group=0',
|
||||
]))
|
||||
# Tell tilt to watch all of those files for changes.
|
||||
watch_file('../../../deploy/concierge')
|
||||
|
||||
# Collect all the deployed local-user-authenticator resources under a "concierge" resource tab.
|
||||
k8s_resource(
|
||||
workload='pinniped-concierge', # this is the deployment name
|
||||
new_name='concierge', # this is the name that will appear in the tilt UI
|
||||
objects=[
|
||||
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
|
||||
'concierge:namespace',
|
||||
'pinniped-concierge-aggregated-api-server:clusterrole',
|
||||
'pinniped-concierge-aggregated-api-server:clusterrolebinding',
|
||||
'pinniped-concierge-aggregated-api-server:role',
|
||||
'pinniped-concierge-aggregated-api-server:rolebinding',
|
||||
'pinniped-concierge-cluster-info-lister-watcher:role',
|
||||
'pinniped-concierge-cluster-info-lister-watcher:rolebinding',
|
||||
'pinniped-concierge-config:configmap',
|
||||
'pinniped-concierge-create-token-credential-requests:clusterrole',
|
||||
'pinniped-concierge-create-token-credential-requests:clusterrolebinding',
|
||||
'pinniped-concierge-extension-apiserver-authentication-reader:rolebinding',
|
||||
'pinniped-concierge-kube-system-pod-read:role',
|
||||
'pinniped-concierge-kube-system-pod-read:rolebinding',
|
||||
'pinniped-concierge:clusterrolebinding',
|
||||
'pinniped-concierge:serviceaccount',
|
||||
'credentialissuers.config.concierge.pinniped.dev:customresourcedefinition',
|
||||
'webhookauthenticators.authentication.concierge.pinniped.dev:customresourcedefinition',
|
||||
'v1alpha1.login.concierge.pinniped.dev:apiservice',
|
||||
],
|
||||
)
|
||||
|
||||
#####################################################################################################
|
||||
# Finish setting up cluster and creating integration test env file
|
||||
#
|
||||
|
||||
# Collect environment variables needed to run our integration test suite.
|
||||
local_resource(
|
||||
'test-env',
|
||||
'TILT_MODE=yes ../../prepare-for-integration-tests.sh',
|
||||
resource_deps=['local-user-auth', 'concierge', 'supervisor', 'dex', 'proxy'],
|
||||
deps=['../../prepare-for-integration-tests.sh'],
|
||||
)
|
||||
@@ -1,19 +0,0 @@
|
||||
# Copyright 2020 VMware, Inc.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Use a runtime image based on Debian slim
|
||||
FROM debian:10.8-slim
|
||||
|
||||
# Copy the binary which was built outside the container.
|
||||
COPY build/pinniped-concierge /usr/local/bin/pinniped-concierge
|
||||
|
||||
# Document the port
|
||||
EXPOSE 8443
|
||||
|
||||
# Run as non-root for security posture
|
||||
# Commented out because it breaks the live-reload feature of Tilt. See https://github.com/tilt-dev/tilt/issues/2300
|
||||
# Be aware that this creates a significant difference between running with Tilt and running otherwise.
|
||||
#USER 1001:1001
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/pinniped-concierge"]
|
||||
@@ -1,19 +0,0 @@
|
||||
# Copyright 2020 VMware, Inc.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Use a runtime image based on Debian slim
|
||||
FROM debian:10.8-slim
|
||||
|
||||
# Copy the binary which was built outside the container.
|
||||
COPY build/local-user-authenticator /usr/local/bin/local-user-authenticator
|
||||
|
||||
# Document the port
|
||||
EXPOSE 8443
|
||||
|
||||
# Run as non-root for security posture
|
||||
# Commented out because it breaks the live-reload feature of Tilt. See https://github.com/tilt-dev/tilt/issues/2300
|
||||
# Be aware that this creates a significant difference between running with Tilt and running otherwise.
|
||||
#USER 1001:1001
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/local-user-authenticator"]
|
||||
@@ -1,21 +0,0 @@
|
||||
# Copyright 2020 VMware, Inc.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Use a runtime image based on Debian slim
|
||||
FROM debian:10.8-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y ca-certificates && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy the binary which was built outside the container.
|
||||
COPY build/pinniped-supervisor /usr/local/bin/pinniped-supervisor
|
||||
|
||||
# Document the port
|
||||
EXPOSE 8080 8443
|
||||
|
||||
# Run as non-root for security posture
|
||||
# Commented out because it breaks the live-reload feature of Tilt. See https://github.com/tilt-dev/tilt/issues/2300
|
||||
# Be aware that this creates a significant difference between running with Tilt and running otherwise.
|
||||
#USER 1001:1001
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/pinniped-supervisor"]
|
||||
@@ -1,44 +0,0 @@
|
||||
def docker_build_sub(ref, context, extra_cmds, child_context=None, base_suffix='-tilt_docker_build_sub_base', live_update=[], **kwargs):
|
||||
"""
|
||||
Substitutes in a docker image with extra Dockerfile commands.
|
||||
|
||||
This allows you to easily customize your docker build for your dev environment without changing your prod Dockerfile.
|
||||
|
||||
This works by:
|
||||
1. Renaming the original image to, e.g. "myimage-base"
|
||||
2. Creating a new image named, e.g. "myimage" that starts with "FROM myimage-base"
|
||||
3. Adding whatever extra stuff you want
|
||||
|
||||
Examples:
|
||||
```
|
||||
# load the extension
|
||||
load("ext://docker_build_sub", "docker_build_sub")
|
||||
|
||||
# ensure you have vim installed when running in dev, so you can
|
||||
# shell into the box and look at files
|
||||
docker_build_sub('myimage', '.', extra_cmds=["apt-get install vim"])
|
||||
|
||||
# use live_update to sync files from outside your docker context
|
||||
docker_build_sub('foo', 'foo', child_context='bar',
|
||||
extra_cmds=['ADD . /bar'],
|
||||
live_update=[
|
||||
sync('foo', '/foo'),
|
||||
sync('bar', '/bar'),
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
This function supports all the normal `docker_build` arguments. See [docker_build API docs](https://docs.tilt.dev/api.html#api.docker_build) for arguments not mentioned here..
|
||||
|
||||
Args:
|
||||
context (str): The directory in which to build the parent (original) image. If child_context is not set, also the directory in which to build the new child image.
|
||||
extra_cmds (List[str]): Any extra Dockerfile commands you want to run when building the image.
|
||||
child_context (str): The directory in which to build the new child image. If unset (None), defaults to the parent image's context.
|
||||
base_suffix (str): The suffix to append to the parent (original) image's name so that the new child image can take the original name. This is mostly ignorable, and just here in case the default generates a conflict for you.
|
||||
"""
|
||||
if not child_context:
|
||||
child_context = context
|
||||
base_ref = '%s-base' % ref
|
||||
docker_build(base_ref, context, **kwargs)
|
||||
df = '\n'.join(['FROM %s' % base_ref] + extra_cmds)
|
||||
docker_build(ref, child_context, dockerfile_contents=df, live_update=live_update, **kwargs)
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"Extensions": [
|
||||
{
|
||||
"Name": "restart_process",
|
||||
"GitCommitHash": "b8df6f5f3368ced855da56e002027a3bd1a61bdf",
|
||||
"ExtensionRegistry": "https://github.com/tilt-dev/tilt-extensions",
|
||||
"TimeFetched": "2020-09-03T23:04:40.167635-05:00"
|
||||
},
|
||||
{
|
||||
"Name": "docker_build_sub",
|
||||
"GitCommitHash": "b8df6f5f3368ced855da56e002027a3bd1a61bdf",
|
||||
"ExtensionRegistry": "https://github.com/tilt-dev/tilt-extensions",
|
||||
"TimeFetched": "2020-09-04T18:01:24.795509-05:00"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
RESTART_FILE = '/.restart-proc'
|
||||
TYPE_RESTART_CONTAINER_STEP = 'live_update_restart_container_step'
|
||||
|
||||
KWARGS_BLACKLIST = [
|
||||
# since we'll be passing `dockerfile_contents` when building the
|
||||
# child image, remove any kwargs that might conflict
|
||||
'dockerfile', 'dockerfile_contents',
|
||||
|
||||
# 'target' isn't relevant to our child build--if we pass this arg,
|
||||
# Docker will just fail to find the specified stage and error out
|
||||
'target',
|
||||
]
|
||||
|
||||
def docker_build_with_restart(ref, context, entrypoint, live_update,
|
||||
base_suffix='-tilt_docker_build_with_restart_base', restart_file=RESTART_FILE, **kwargs):
|
||||
"""Wrap a docker_build call and its associated live_update steps so that the last step
|
||||
of any live update is to rerun the given entrypoint.
|
||||
|
||||
|
||||
Args:
|
||||
ref: name for this image (e.g. 'myproj/backend' or 'myregistry/myproj/backend'); as the parameter of the same name in docker_build
|
||||
context: path to use as the Docker build context; as the parameter of the same name in docker_build
|
||||
entrypoint: the command to be (re-)executed when the container starts or when a live_update is run
|
||||
live_update: set of steps for updating a running container; as the parameter of the same name in docker_build
|
||||
base_suffix: suffix for naming the base image, applied as {ref}{base_suffix}
|
||||
restart_file: file that Tilt will update during a live_update to signal the entrypoint to rerun
|
||||
**kwargs: will be passed to the underlying `docker_build` call
|
||||
"""
|
||||
|
||||
# first, validate the given live_update steps
|
||||
if len(live_update) == 0:
|
||||
fail("`docker_build_with_restart` requires at least one live_update step")
|
||||
for step in live_update:
|
||||
if type(step) == TYPE_RESTART_CONTAINER_STEP:
|
||||
fail("`docker_build_with_restart` is not compatible with live_update step: "+
|
||||
"`restart_container()` (this extension is meant to REPLACE restart_container() )")
|
||||
|
||||
# rename the original image to make it a base image and declare a docker_build for it
|
||||
base_ref = '{}{}'.format(ref, base_suffix)
|
||||
docker_build(base_ref, context, **kwargs)
|
||||
|
||||
# declare a new docker build that adds a static binary of tilt-restart-wrapper
|
||||
# (which makes use of `entr` to watch files and restart processes) to the user's image
|
||||
df = '''
|
||||
FROM tiltdev/restart-helper:2020-07-16 as restart-helper
|
||||
|
||||
FROM {}
|
||||
USER root
|
||||
RUN ["touch", "{}"]
|
||||
COPY --from=restart-helper /tilt-restart-wrapper /
|
||||
COPY --from=restart-helper /entr /
|
||||
'''.format(base_ref, restart_file)
|
||||
|
||||
# Clean kwargs for building the child image (which builds on user's specified
|
||||
# image and copies in Tilt's restart wrapper). In practice, this means removing
|
||||
# kwargs that were relevant to building the user's specified image but are NOT
|
||||
# relevant to building the child image / may conflict with args we specifically
|
||||
# pass for the child image.
|
||||
cleaned_kwargs = {k: v for k, v in kwargs.items() if k not in KWARGS_BLACKLIST}
|
||||
|
||||
# Change the entrypoint to use `tilt-restart-wrapper`.
|
||||
# `tilt-restart-wrapper` makes use of `entr` (https://github.com/eradman/entr/) to
|
||||
# re-execute $entrypoint whenever $restart_file changes
|
||||
if type(entrypoint) == type(""):
|
||||
entrypoint_with_entr = ["/tilt-restart-wrapper", "--watch_file={}".format(restart_file), "sh", "-c", entrypoint]
|
||||
elif type(entrypoint) == type([]):
|
||||
entrypoint_with_entr = ["/tilt-restart-wrapper", "--watch_file={}".format(restart_file)] + entrypoint
|
||||
else:
|
||||
fail("`entrypoint` must be a string or list of strings: got {}".format(type(entrypoint)))
|
||||
|
||||
# last live_update step should always be to modify $restart_file, which
|
||||
# triggers the process wrapper to rerun $entrypoint
|
||||
# NB: write `date` instead of just `touch`ing because `entr` doesn't respond
|
||||
# to timestamp changes, only writes (see https://github.com/eradman/entr/issues/32)
|
||||
live_update = live_update + [run('date > {}'.format(restart_file))]
|
||||
|
||||
docker_build(ref, context, entrypoint=entrypoint_with_entr, dockerfile_contents=df,
|
||||
live_update=live_update, **cleaned_kwargs)
|
||||
@@ -14,14 +14,6 @@ set -euo pipefail
|
||||
#
|
||||
# Helper functions
|
||||
#
|
||||
TILT_MODE=${TILT_MODE:-no}
|
||||
function tilt_mode() {
|
||||
if [[ "$TILT_MODE" == "yes" ]]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
function log_note() {
|
||||
GREEN='\033[0;32m'
|
||||
NC='\033[0m'
|
||||
@@ -57,6 +49,7 @@ help=no
|
||||
skip_build=no
|
||||
clean_kind=no
|
||||
api_group_suffix="pinniped.dev" # same default as in the values.yaml ytt file
|
||||
skip_chromedriver_check=no
|
||||
|
||||
while (("$#")); do
|
||||
case "$1" in
|
||||
@@ -82,6 +75,10 @@ while (("$#")); do
|
||||
api_group_suffix=$1
|
||||
shift
|
||||
;;
|
||||
--live-dangerously)
|
||||
skip_chromedriver_check=yes
|
||||
shift
|
||||
;;
|
||||
-*)
|
||||
log_error "Unsupported flag $1" >&2
|
||||
exit 1
|
||||
@@ -123,18 +120,21 @@ check_dependency chromedriver "Please install chromedriver. e.g. 'brew install c
|
||||
|
||||
# Check that Chrome and chromedriver versions match. If chromedriver falls a couple versions behind
|
||||
# then usually tests start to fail with strange error messages.
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
chrome_version=$(/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --version | cut -d ' ' -f3 | cut -d '.' -f1)
|
||||
else
|
||||
chrome_version=$(google-chrome --version | cut -d ' ' -f3 | cut -d '.' -f1)
|
||||
fi
|
||||
chromedriver_version=$(chromedriver --version | cut -d ' ' -f2 | cut -d '.' -f1)
|
||||
if [[ "$chrome_version" != "$chromedriver_version" ]]; then
|
||||
log_error "It appears that you are using Chrome $chrome_version with chromedriver $chromedriver_version."
|
||||
log_error "Please use the same version of chromedriver as Chrome."
|
||||
log_error "If you are using the latest version of Chrome, then you can upgrade"
|
||||
log_error "to the latest chromedriver, e.g. 'brew upgrade chromedriver' on MacOS."
|
||||
exit 1
|
||||
if [[ "$skip_chromedriver_check" == "no" ]]; then
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
chrome_version=$(/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --version | cut -d ' ' -f3 | cut -d '.' -f1)
|
||||
else
|
||||
chrome_version=$(google-chrome --version | cut -d ' ' -f3 | cut -d '.' -f1)
|
||||
fi
|
||||
chromedriver_version=$(chromedriver --version | cut -d ' ' -f2 | cut -d '.' -f1)
|
||||
if [[ "$chrome_version" != "$chromedriver_version" ]]; then
|
||||
log_error "It appears that you are using Chrome $chrome_version with chromedriver $chromedriver_version."
|
||||
log_error "Please use the same version of chromedriver as Chrome."
|
||||
log_error "If you are using the latest version of Chrome, then you can upgrade"
|
||||
log_error "to the latest chromedriver, e.g. 'brew upgrade chromedriver' on MacOS."
|
||||
log_error "Feeling lucky? Add --live-dangerously to skip this check."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Require kubectl >= 1.18.x
|
||||
@@ -143,94 +143,94 @@ if [ "$(kubectl version --client=true --short | cut -d '.' -f 2)" -lt 18 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! tilt_mode; then
|
||||
if [[ "$clean_kind" == "yes" ]]; then
|
||||
log_note "Deleting running kind cluster to prepare from a clean slate..."
|
||||
./hack/kind-down.sh
|
||||
if [[ "$clean_kind" == "yes" ]]; then
|
||||
log_note "Deleting running kind cluster to prepare from a clean slate..."
|
||||
./hack/kind-down.sh
|
||||
fi
|
||||
|
||||
#
|
||||
# Setup kind and build the app
|
||||
#
|
||||
log_note "Checking for running kind cluster..."
|
||||
if ! kind get clusters | grep -q -e '^pinniped$'; then
|
||||
log_note "Creating a kind cluster..."
|
||||
# Our kind config exposes node port 31234 as 127.0.0.1:12345, 31243 as 127.0.0.1:12344, and 31235 as 127.0.0.1:12346
|
||||
./hack/kind-up.sh
|
||||
else
|
||||
if ! kubectl cluster-info | grep -E '(master|control plane)' | grep -q 127.0.0.1; then
|
||||
log_error "Seems like your kubeconfig is not targeting a local cluster."
|
||||
log_error "Exiting to avoid accidentally running tests against a real cluster."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
#
|
||||
# Setup kind and build the app
|
||||
#
|
||||
log_note "Checking for running kind cluster..."
|
||||
if ! kind get clusters | grep -q -e '^pinniped$'; then
|
||||
log_note "Creating a kind cluster..."
|
||||
# Our kind config exposes node port 31234 as 127.0.0.1:12345, 31243 as 127.0.0.1:12344, and 31235 as 127.0.0.1:12346
|
||||
./hack/kind-up.sh
|
||||
else
|
||||
if ! kubectl cluster-info | grep -E '(master|control plane)' | grep -q 127.0.0.1; then
|
||||
log_error "Seems like your kubeconfig is not targeting a local cluster."
|
||||
log_error "Exiting to avoid accidentally running tests against a real cluster."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
registry="pinniped.local"
|
||||
repo="test/build"
|
||||
registry_repo="$registry/$repo"
|
||||
tag=$(uuidgen) # always a new tag to force K8s to reload the image on redeploy
|
||||
|
||||
if [[ "$skip_build" == "yes" ]]; then
|
||||
most_recent_tag=$(docker images "$registry/$repo" --format "{{.Tag}}" | head -1)
|
||||
if [[ -n "$most_recent_tag" ]]; then
|
||||
tag="$most_recent_tag"
|
||||
do_build=no
|
||||
else
|
||||
# Oops, there was no previous build. Need to build anyway.
|
||||
do_build=yes
|
||||
fi
|
||||
registry="pinniped.local"
|
||||
repo="test/build"
|
||||
registry_repo="$registry/$repo"
|
||||
tag=$(uuidgen) # always a new tag to force K8s to reload the image on redeploy
|
||||
|
||||
if [[ "$skip_build" == "yes" ]]; then
|
||||
most_recent_tag=$(docker images "$registry/$repo" --format "{{.Tag}}" | head -1)
|
||||
if [[ -n "$most_recent_tag" ]]; then
|
||||
tag="$most_recent_tag"
|
||||
do_build=no
|
||||
else
|
||||
# Oops, there was no previous build. Need to build anyway.
|
||||
do_build=yes
|
||||
fi
|
||||
|
||||
registry_repo_tag="${registry_repo}:${tag}"
|
||||
|
||||
if [[ "$do_build" == "yes" ]]; then
|
||||
# Rebuild the code
|
||||
log_note "Docker building the app..."
|
||||
docker build . --tag "$registry_repo_tag"
|
||||
fi
|
||||
|
||||
# Load it into the cluster
|
||||
log_note "Loading the app's container image into the kind cluster..."
|
||||
kind load docker-image "$registry_repo_tag" --name pinniped
|
||||
|
||||
manifest=/tmp/manifest.yaml
|
||||
|
||||
#
|
||||
# Deploy local-user-authenticator
|
||||
#
|
||||
pushd deploy/local-user-authenticator >/dev/null
|
||||
|
||||
log_note "Deploying the local-user-authenticator app to the cluster..."
|
||||
ytt --file . \
|
||||
--data-value "image_repo=$registry_repo" \
|
||||
--data-value "image_tag=$tag" >"$manifest"
|
||||
|
||||
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
|
||||
kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest"
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
#
|
||||
# Deploy dex
|
||||
#
|
||||
dex_test_password="$(openssl rand -hex 16)"
|
||||
pushd test/deploy/dex >/dev/null
|
||||
|
||||
log_note "Deploying Dex to the cluster..."
|
||||
ytt --file . >"$manifest"
|
||||
ytt --file . \
|
||||
--data-value-yaml "supervisor_redirect_uris=[https://pinniped-supervisor-clusterip.supervisor.svc.cluster.local/some/path/callback]" \
|
||||
--data-value "pinny_bcrypt_passwd_hash=$(htpasswd -nbBC 10 x "$dex_test_password" | sed -e "s/^x://")" \
|
||||
>"$manifest"
|
||||
|
||||
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
|
||||
kapp deploy --yes --app dex --diff-changes --file "$manifest"
|
||||
|
||||
popd >/dev/null
|
||||
else
|
||||
do_build=yes
|
||||
fi
|
||||
|
||||
registry_repo_tag="${registry_repo}:${tag}"
|
||||
|
||||
if [[ "$do_build" == "yes" ]]; then
|
||||
# Rebuild the code
|
||||
log_note "Docker building the app..."
|
||||
# DOCKER_BUILDKIT=1 is optional on MacOS but required on linux.
|
||||
DOCKER_BUILDKIT=1 docker build . --tag "$registry_repo_tag"
|
||||
fi
|
||||
|
||||
# Load it into the cluster
|
||||
log_note "Loading the app's container image into the kind cluster..."
|
||||
kind load docker-image "$registry_repo_tag" --name pinniped
|
||||
|
||||
manifest=/tmp/manifest.yaml
|
||||
|
||||
#
|
||||
# Deploy local-user-authenticator
|
||||
#
|
||||
pushd deploy/local-user-authenticator >/dev/null
|
||||
|
||||
log_note "Deploying the local-user-authenticator app to the cluster..."
|
||||
ytt --file . \
|
||||
--data-value "image_repo=$registry_repo" \
|
||||
--data-value "image_tag=$tag" >"$manifest"
|
||||
|
||||
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
|
||||
kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest"
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
#
|
||||
# Deploy Tools
|
||||
#
|
||||
dex_test_password="$(openssl rand -hex 16)"
|
||||
ldap_test_password="$(openssl rand -hex 16)"
|
||||
pushd test/deploy/tools >/dev/null
|
||||
|
||||
log_note "Deploying Tools to the cluster..."
|
||||
ytt --file . \
|
||||
--data-value-yaml "supervisor_redirect_uris=[https://pinniped-supervisor-clusterip.supervisor.svc.cluster.local/some/path/callback]" \
|
||||
--data-value "pinny_ldap_password=$ldap_test_password" \
|
||||
--data-value "pinny_bcrypt_passwd_hash=$(htpasswd -nbBC 10 x "$dex_test_password" | sed -e "s/^x://")" \
|
||||
>"$manifest"
|
||||
|
||||
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
|
||||
kapp deploy --yes --app tools --diff-changes --file "$manifest"
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
test_username="test-username"
|
||||
test_groups="test-group-0,test-group-1"
|
||||
test_password="$(openssl rand -hex 16)"
|
||||
@@ -250,29 +250,27 @@ supervisor_app_name="pinniped-supervisor"
|
||||
supervisor_namespace="supervisor"
|
||||
supervisor_custom_labels="{mySupervisorCustomLabelName: mySupervisorCustomLabelValue}"
|
||||
|
||||
if ! tilt_mode; then
|
||||
pushd deploy/supervisor >/dev/null
|
||||
pushd deploy/supervisor >/dev/null
|
||||
|
||||
log_note "Deploying the Pinniped Supervisor app to the cluster..."
|
||||
ytt --file . \
|
||||
--data-value "app_name=$supervisor_app_name" \
|
||||
--data-value "namespace=$supervisor_namespace" \
|
||||
--data-value "api_group_suffix=$api_group_suffix" \
|
||||
--data-value "image_repo=$registry_repo" \
|
||||
--data-value "image_tag=$tag" \
|
||||
--data-value "log_level=debug" \
|
||||
--data-value-yaml "custom_labels=$supervisor_custom_labels" \
|
||||
--data-value-yaml 'service_http_nodeport_port=80' \
|
||||
--data-value-yaml 'service_http_nodeport_nodeport=31234' \
|
||||
--data-value-yaml 'service_https_nodeport_port=443' \
|
||||
--data-value-yaml 'service_https_nodeport_nodeport=31243' \
|
||||
--data-value-yaml 'service_https_clusterip_port=443' \
|
||||
>"$manifest"
|
||||
log_note "Deploying the Pinniped Supervisor app to the cluster..."
|
||||
ytt --file . \
|
||||
--data-value "app_name=$supervisor_app_name" \
|
||||
--data-value "namespace=$supervisor_namespace" \
|
||||
--data-value "api_group_suffix=$api_group_suffix" \
|
||||
--data-value "image_repo=$registry_repo" \
|
||||
--data-value "image_tag=$tag" \
|
||||
--data-value "log_level=debug" \
|
||||
--data-value-yaml "custom_labels=$supervisor_custom_labels" \
|
||||
--data-value-yaml 'service_http_nodeport_port=80' \
|
||||
--data-value-yaml 'service_http_nodeport_nodeport=31234' \
|
||||
--data-value-yaml 'service_https_nodeport_port=443' \
|
||||
--data-value-yaml 'service_https_nodeport_nodeport=31243' \
|
||||
--data-value-yaml 'service_https_clusterip_port=443' \
|
||||
>"$manifest"
|
||||
|
||||
kapp deploy --yes --app "$supervisor_app_name" --diff-changes --file "$manifest"
|
||||
kapp deploy --yes --app "$supervisor_app_name" --diff-changes --file "$manifest"
|
||||
|
||||
popd >/dev/null
|
||||
fi
|
||||
popd >/dev/null
|
||||
|
||||
#
|
||||
# Deploy the Pinniped Concierge
|
||||
@@ -284,38 +282,42 @@ webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-cer
|
||||
discovery_url="$(TERM=dumb kubectl cluster-info | awk '/master|control plane/ {print $NF}')"
|
||||
concierge_custom_labels="{myConciergeCustomLabelName: myConciergeCustomLabelValue}"
|
||||
|
||||
if ! tilt_mode; then
|
||||
pushd deploy/concierge >/dev/null
|
||||
pushd deploy/concierge >/dev/null
|
||||
|
||||
log_note "Deploying the Pinniped Concierge app to the cluster..."
|
||||
ytt --file . \
|
||||
--data-value "app_name=$concierge_app_name" \
|
||||
--data-value "namespace=$concierge_namespace" \
|
||||
--data-value "api_group_suffix=$api_group_suffix" \
|
||||
--data-value "log_level=debug" \
|
||||
--data-value-yaml "custom_labels=$concierge_custom_labels" \
|
||||
--data-value "image_repo=$registry_repo" \
|
||||
--data-value "image_tag=$tag" \
|
||||
--data-value "discovery_url=$discovery_url" >"$manifest"
|
||||
log_note "Deploying the Pinniped Concierge app to the cluster..."
|
||||
ytt --file . \
|
||||
--data-value "app_name=$concierge_app_name" \
|
||||
--data-value "namespace=$concierge_namespace" \
|
||||
--data-value "api_group_suffix=$api_group_suffix" \
|
||||
--data-value "log_level=debug" \
|
||||
--data-value-yaml "custom_labels=$concierge_custom_labels" \
|
||||
--data-value "image_repo=$registry_repo" \
|
||||
--data-value "image_tag=$tag" \
|
||||
--data-value "discovery_url=$discovery_url" >"$manifest"
|
||||
|
||||
kapp deploy --yes --app "$concierge_app_name" --diff-changes --file "$manifest"
|
||||
kapp deploy --yes --app "$concierge_app_name" --diff-changes --file "$manifest"
|
||||
|
||||
popd >/dev/null
|
||||
fi
|
||||
popd >/dev/null
|
||||
|
||||
#
|
||||
# Download the test CA bundle that was generated in the Dex pod.
|
||||
# Note that this returns a base64 encoded value.
|
||||
#
|
||||
test_ca_bundle_pem="$(kubectl get secrets -n dex certs -o go-template='{{index .data "ca.pem" | base64decode}}')"
|
||||
test_ca_bundle_pem="$(kubectl get secrets -n tools certs -o go-template='{{index .data "ca.pem"}}')"
|
||||
|
||||
#
|
||||
# Create the environment file
|
||||
# Create the environment file.
|
||||
#
|
||||
# Note that all values should not contains newlines, except for PINNIPED_TEST_CLUSTER_CAPABILITY_YAML,
|
||||
# so that the environment can also be used in tools like GoLand. Therefore, multi-line values,
|
||||
# such as PEM-formatted certificates, should be base64 encoded.
|
||||
#
|
||||
kind_capabilities_file="$pinniped_path/test/cluster_capabilities/kind.yaml"
|
||||
pinniped_cluster_capability_file_content=$(cat "$kind_capabilities_file")
|
||||
|
||||
cat <<EOF >/tmp/integration-test-env
|
||||
# The following env vars should be set before running 'go test -v -count 1 -timeout 0 ./test/integration'
|
||||
export PINNIPED_TEST_TOOLS_NAMESPACE="tools"
|
||||
export PINNIPED_TEST_CONCIERGE_NAMESPACE=${concierge_namespace}
|
||||
export PINNIPED_TEST_CONCIERGE_APP_NAME=${concierge_app_name}
|
||||
export PINNIPED_TEST_CONCIERGE_CUSTOM_LABELS='${concierge_custom_labels}'
|
||||
@@ -330,13 +332,30 @@ export PINNIPED_TEST_SUPERVISOR_CUSTOM_LABELS='${supervisor_custom_labels}'
|
||||
export PINNIPED_TEST_SUPERVISOR_HTTP_ADDRESS="127.0.0.1:12345"
|
||||
export PINNIPED_TEST_SUPERVISOR_HTTPS_ADDRESS="localhost:12344"
|
||||
export PINNIPED_TEST_PROXY=http://127.0.0.1:12346
|
||||
export PINNIPED_TEST_CLI_OIDC_ISSUER=https://dex.dex.svc.cluster.local/dex
|
||||
export PINNIPED_TEST_LDAP_HOST=ldap.tools.svc.cluster.local
|
||||
export PINNIPED_TEST_LDAP_LDAPS_CA_BUNDLE="${test_ca_bundle_pem}"
|
||||
export PINNIPED_TEST_LDAP_BIND_ACCOUNT_USERNAME="cn=admin,dc=pinniped,dc=dev"
|
||||
export PINNIPED_TEST_LDAP_BIND_ACCOUNT_PASSWORD=password
|
||||
export PINNIPED_TEST_LDAP_USERS_SEARCH_BASE="ou=users,dc=pinniped,dc=dev"
|
||||
export PINNIPED_TEST_LDAP_GROUPS_SEARCH_BASE="ou=groups,dc=pinniped,dc=dev"
|
||||
export PINNIPED_TEST_LDAP_USER_DN="cn=pinny,ou=users,dc=pinniped,dc=dev"
|
||||
export PINNIPED_TEST_LDAP_USER_CN="pinny"
|
||||
export PINNIPED_TEST_LDAP_USER_PASSWORD=${ldap_test_password}
|
||||
export PINNIPED_TEST_LDAP_USER_UNIQUE_ID_ATTRIBUTE_NAME="uidNumber"
|
||||
export PINNIPED_TEST_LDAP_USER_UNIQUE_ID_ATTRIBUTE_VALUE="1000"
|
||||
export PINNIPED_TEST_LDAP_USER_EMAIL_ATTRIBUTE_NAME="mail"
|
||||
export PINNIPED_TEST_LDAP_USER_EMAIL_ATTRIBUTE_VALUE="pinny.ldap@example.com"
|
||||
export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_GROUPS_DN="cn=ball-game-players,ou=beach-groups,ou=groups,dc=pinniped,dc=dev;cn=seals,ou=groups,dc=pinniped,dc=dev"
|
||||
export PINNIPED_TEST_LDAP_EXPECTED_INDIRECT_GROUPS_DN="cn=pinnipeds,ou=groups,dc=pinniped,dc=dev;cn=mammals,ou=groups,dc=pinniped,dc=dev"
|
||||
export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_GROUPS_CN="ball-game-players;seals"
|
||||
export PINNIPED_TEST_LDAP_EXPECTED_INDIRECT_GROUPS_CN="pinnipeds;mammals"
|
||||
export PINNIPED_TEST_CLI_OIDC_ISSUER=https://dex.tools.svc.cluster.local/dex
|
||||
export PINNIPED_TEST_CLI_OIDC_ISSUER_CA_BUNDLE="${test_ca_bundle_pem}"
|
||||
export PINNIPED_TEST_CLI_OIDC_CLIENT_ID=pinniped-cli
|
||||
export PINNIPED_TEST_CLI_OIDC_CALLBACK_URL=http://127.0.0.1:48095/callback
|
||||
export PINNIPED_TEST_CLI_OIDC_USERNAME=pinny@example.com
|
||||
export PINNIPED_TEST_CLI_OIDC_PASSWORD=${dex_test_password}
|
||||
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER=https://dex.dex.svc.cluster.local/dex
|
||||
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER=https://dex.tools.svc.cluster.local/dex
|
||||
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER_CA_BUNDLE="${test_ca_bundle_pem}"
|
||||
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ADDITIONAL_SCOPES=email
|
||||
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_USERNAME_CLAIM=email
|
||||
@@ -357,23 +376,18 @@ export PINNIPED_TEST_CLUSTER_CAPABILITY_YAML
|
||||
EOF
|
||||
|
||||
#
|
||||
# Print instructions for next steps
|
||||
# Print instructions for next steps.
|
||||
#
|
||||
goland_vars=$(grep -v '^#' /tmp/integration-test-env | grep -E '^export .+=' | sed 's/export //g' | tr '\n' ';')
|
||||
|
||||
log_note
|
||||
log_note "🚀 Ready to run integration tests! For example..."
|
||||
log_note " cd $pinniped_path"
|
||||
log_note ' source /tmp/integration-test-env && go test -v -race -count 1 -timeout 0 ./test/integration'
|
||||
log_note
|
||||
log_note 'Want to run integration tests in GoLand? Copy/paste this "Environment" value for GoLand run configurations:'
|
||||
log_note " ${goland_vars}PINNIPED_TEST_CLUSTER_CAPABILITY_FILE=${kind_capabilities_file}"
|
||||
log_note "Using GoLand? Paste the result of this command into GoLand's run configuration \"Environment\"."
|
||||
log_note " hack/integration-test-env-goland.sh | pbcopy"
|
||||
log_note
|
||||
|
||||
if ! tilt_mode; then
|
||||
log_note "You can rerun this script to redeploy local production code changes while you are working."
|
||||
log_note
|
||||
log_note "To delete the deployments, run:"
|
||||
log_note " kapp delete -a local-user-authenticator -y && kapp delete -a $concierge_app_name -y && kapp delete -a $supervisor_app_name -y"
|
||||
log_note "When you're finished, use './hack/kind-down.sh' to tear down the cluster."
|
||||
fi
|
||||
log_note "You can rerun this script to redeploy local production code changes while you are working."
|
||||
log_note
|
||||
log_note "To delete the deployments, run:"
|
||||
log_note " kapp delete -a local-user-authenticator -y && kapp delete -a $concierge_app_name -y && kapp delete -a $supervisor_app_name -y"
|
||||
log_note "When you're finished, use './hack/kind-down.sh' to tear down the cluster."
|
||||
|
||||
@@ -82,7 +82,7 @@ metadata:
|
||||
spec:
|
||||
issuer: "$PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER"
|
||||
tls:
|
||||
certificateAuthorityData: "$(echo "$PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER_CA_BUNDLE" | base64)"
|
||||
certificateAuthorityData: "$PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER_CA_BUNDLE"
|
||||
authorizationConfig:
|
||||
additionalScopes: [ ${PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ADDITIONAL_SCOPES} ]
|
||||
claims:
|
||||
|
||||
37
hack/prepare-webhook-on-kind.sh
Executable file
37
hack/prepare-webhook-on-kind.sh
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#
|
||||
# This script deploys a WebhookAuthenticator to use for manual testing. It
|
||||
# assumes that you have run hack/prepare-for-integration-tests.sh while pointed
|
||||
# at the current cluster.
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Change working directory to the top of the repo.
|
||||
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
cd "$ROOT"
|
||||
|
||||
# Read the env vars output by hack/prepare-for-integration-tests.sh.
|
||||
source /tmp/integration-test-env
|
||||
|
||||
# Create WebhookAuthenticator.
|
||||
cat <<EOF | kubectl apply -f - 1>&2
|
||||
kind: WebhookAuthenticator
|
||||
apiVersion: authentication.concierge.pinniped.dev/v1alpha1
|
||||
metadata:
|
||||
name: my-webhook
|
||||
spec:
|
||||
endpoint: ${PINNIPED_TEST_WEBHOOK_ENDPOINT}
|
||||
tls:
|
||||
certificateAuthorityData: ${PINNIPED_TEST_WEBHOOK_CA_BUNDLE}
|
||||
EOF
|
||||
|
||||
# Use the CLI to get a kubeconfig that will use this WebhookAuthenticator.
|
||||
go build -o /tmp/pinniped ./cmd/pinniped
|
||||
/tmp/pinniped get kubeconfig --static-token "$PINNIPED_TEST_USER_TOKEN" >/tmp/kubeconfig-with-webhook-auth.yaml
|
||||
|
||||
echo "export KUBECONFIG=/tmp/kubeconfig-with-webhook-auth.yaml"
|
||||
@@ -1,9 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
|
||||
cd "${ROOT}"
|
||||
exec tilt down -f ./hack/lib/tilt/Tiltfile
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
|
||||
|
||||
cd "${ROOT}"
|
||||
|
||||
exec tilt up -f ./hack/lib/tilt/Tiltfile "$@"
|
||||
49
internal/concierge/impersonator/doc.go
Normal file
49
internal/concierge/impersonator/doc.go
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
/*
|
||||
Package impersonator implements an HTTP server that reverse proxies all requests
|
||||
to the Kubernetes API server with impersonation headers set to match the calling
|
||||
user. Since impersonation cannot be disabled, this allows us to dynamically
|
||||
configure authentication on any cluster, even the cloud hosted ones.
|
||||
|
||||
The specifics of how it is implemented are of interest. The most novel detail
|
||||
about the implementation is that we use the "front-end" of the aggregated API
|
||||
server logic, mainly the DefaultBuildHandlerChain func, to handle how incoming
|
||||
requests are authenticated, authorized, etc. The "back-end" of the proxy is a
|
||||
reverse proxy that impersonates the user (instead of serving REST APIs).
|
||||
|
||||
In terms of authentication, we aim to handle every type of authentication that
|
||||
the Kubernetes API server supports by delegating most of the checks to it. We
|
||||
also honor client certs from a CA that is specific to the impersonation proxy.
|
||||
This approach allows clients to use the Token Credential Request API even when
|
||||
we do not have the cluster's signing key.
|
||||
|
||||
In terms of authorization, we rely mostly on the Kubernetes API server. Since we
|
||||
impersonate the user, the proxied request will be authorized against that user.
|
||||
Thus for all regular REST verbs, we perform no authorization checks.
|
||||
|
||||
Nested impersonation is handled by performing the same authorization checks the
|
||||
Kubernetes API server would (we get this mostly for free by using the aggregated
|
||||
API server code). We preserve the original user in the reserved extra key
|
||||
original-user-info.impersonation-proxy.concierge.pinniped.dev as a JSON blob of
|
||||
the authenticationv1.UserInfo struct. This is necessary to make sure that the
|
||||
Kubernetes audit log contains all three identities (original user, impersonated
|
||||
user and the impersonation proxy's service account). Capturing the original
|
||||
user information requires that we enable the auditing stack (WithImpersonation
|
||||
only shares this information with the audit stack). To keep things simple,
|
||||
we use the fake audit backend at the Metadata level for all requests. This
|
||||
guarantees that we always have an audit event on every request.
|
||||
|
||||
One final wrinkle is that impersonation cannot impersonate UIDs (yet). This is
|
||||
problematic because service account tokens always assert a UID. To handle this
|
||||
case without losing authentication information, when we see an identity with a
|
||||
UID that was asserted via a bearer token, we simply pass the request through
|
||||
with the original bearer token and no impersonation headers set (as if the user
|
||||
had made the request directly against the Kubernetes API server).
|
||||
|
||||
For all normal requests, we only use http/2.0 when proxying to the API server.
|
||||
For upgrade requests, we only use http/1.1 since these always go from http/1.1
|
||||
to either websockets or SPDY.
|
||||
*/
|
||||
package impersonator
|
||||
@@ -4,14 +4,19 @@
|
||||
package impersonator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -21,6 +26,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||
"k8s.io/apiserver/pkg/audit/policy"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/endpoints/filterlatency"
|
||||
@@ -31,6 +40,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/server/dynamiccertificates"
|
||||
"k8s.io/apiserver/pkg/server/filters"
|
||||
genericoptions "k8s.io/apiserver/pkg/server/options"
|
||||
auditfake "k8s.io/apiserver/plugin/pkg/audit/fake"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport"
|
||||
|
||||
@@ -39,6 +49,7 @@ import (
|
||||
"go.pinniped.dev/internal/httputil/securityheader"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
"go.pinniped.dev/internal/valuelesscontext"
|
||||
)
|
||||
|
||||
// FactoryFunc is a function which can create an impersonator server.
|
||||
@@ -100,7 +111,6 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
recommendedOptions.Authentication.ClientCert.ClientCA = "---irrelevant-but-needs-to-be-non-empty---" // drop when we pick up https://github.com/kubernetes/kubernetes/pull/100055
|
||||
recommendedOptions.Authentication.ClientCert.CAContentProvider = dynamiccertificates.NewUnionCAContentProvider(
|
||||
impersonationProxySignerCA, kubeClientCA,
|
||||
)
|
||||
@@ -163,35 +173,63 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
|
||||
}))
|
||||
handler = filterlatency.TrackStarted(handler, "impersonationproxy")
|
||||
|
||||
handler = filterlatency.TrackCompleted(handler)
|
||||
handler = deleteKnownImpersonationHeaders(handler)
|
||||
handler = filterlatency.TrackStarted(handler, "deleteimpersonationheaders")
|
||||
|
||||
// The standard Kube handler chain (authn, authz, impersonation, audit, etc).
|
||||
// See the genericapiserver.DefaultBuildHandlerChain func for details.
|
||||
handler = defaultBuildHandlerChainFunc(handler, c)
|
||||
|
||||
// we need to grab the bearer token before WithAuthentication deletes it.
|
||||
handler = filterlatency.TrackCompleted(handler)
|
||||
handler = withBearerTokenPreservation(handler)
|
||||
handler = filterlatency.TrackStarted(handler, "bearertokenpreservation")
|
||||
|
||||
// Always set security headers so browsers do the right thing.
|
||||
handler = filterlatency.TrackCompleted(handler)
|
||||
handler = securityheader.Wrap(handler)
|
||||
handler = filterlatency.TrackStarted(handler, "securityheaders")
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
// Overwrite the delegating authorizer with one that only cares about impersonation.
|
||||
// Empty string is disallowed because request info has had bugs in the past where it would leave it empty.
|
||||
disallowedVerbs := sets.NewString("", "impersonate")
|
||||
noImpersonationAuthorizer := &comparableAuthorizer{
|
||||
AuthorizerFunc: func(a authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
// Supporting impersonation is not hard, it would just require a bunch of testing
|
||||
// and configuring the audit layer (to preserve the caller) which we can do later.
|
||||
// We would also want to delete the incoming impersonation headers
|
||||
// instead of overwriting the delegating authorizer, we would
|
||||
// actually use it to make the impersonation authorization checks.
|
||||
if disallowedVerbs.Has(a.GetVerb()) {
|
||||
return authorizer.DecisionDeny, "impersonation is not allowed or invalid verb", nil
|
||||
}
|
||||
// wire up a fake audit backend at the metadata level so we can preserve the original user during nested impersonation
|
||||
// TODO: wire up the real std out logging audit backend based on plog log level
|
||||
serverConfig.AuditPolicyChecker = policy.FakeChecker(auditinternal.LevelMetadata, nil)
|
||||
serverConfig.AuditBackend = &auditfake.Backend{}
|
||||
|
||||
return authorizer.DecisionAllow, "deferring authorization to kube API server", nil
|
||||
// if we ever start unioning a TCR bearer token authenticator with serverConfig.Authenticator
|
||||
// then we will need to update the related assumption in tokenPassthroughRoundTripper
|
||||
|
||||
delegatingAuthorizer := serverConfig.Authorization.Authorizer
|
||||
nestedImpersonationAuthorizer := &comparableAuthorizer{
|
||||
authorizerFunc: func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
switch a.GetVerb() {
|
||||
case "":
|
||||
// Empty string is disallowed because request info has had bugs in the past where it would leave it empty.
|
||||
return authorizer.DecisionDeny, "invalid verb", nil
|
||||
case "create",
|
||||
"update",
|
||||
"delete",
|
||||
"deletecollection",
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
"patch",
|
||||
"proxy":
|
||||
// we know these verbs are from the request info parsing which is safe to delegate to KAS
|
||||
return authorizer.DecisionAllow, "deferring standard verb authorization to kube API server", nil
|
||||
default:
|
||||
// assume everything else is internal SAR checks that we need to run against the requesting user
|
||||
// because when KAS does the check, it may run the check against our service account and not the
|
||||
// requesting user. This also handles the impersonate verb to allow for nested impersonation.
|
||||
return delegatingAuthorizer.Authorize(ctx, a)
|
||||
}
|
||||
},
|
||||
}
|
||||
// Set our custom authorizer before calling Compete(), which will use it.
|
||||
serverConfig.Authorization.Authorizer = noImpersonationAuthorizer
|
||||
serverConfig.Authorization.Authorizer = nestedImpersonationAuthorizer
|
||||
|
||||
impersonationProxyServer, err := serverConfig.Complete().New("impersonation-proxy", genericapiserver.NewEmptyDelegate())
|
||||
if err != nil {
|
||||
@@ -201,7 +239,7 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
|
||||
preparedRun := impersonationProxyServer.PrepareRun()
|
||||
|
||||
// Sanity check. Make sure that our custom authorizer is still in place and did not get changed or wrapped.
|
||||
if preparedRun.Authorizer != noImpersonationAuthorizer {
|
||||
if preparedRun.Authorizer != nestedImpersonationAuthorizer {
|
||||
return nil, constable.Error("invalid mutation of impersonation authorizer detected")
|
||||
}
|
||||
|
||||
@@ -225,11 +263,75 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func deleteKnownImpersonationHeaders(delegate http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// remove known impersonation headers while avoiding mutation of input request
|
||||
// unknown future impersonation headers will still get caught by our later checks
|
||||
if ensureNoImpersonationHeaders(r) != nil {
|
||||
r = r.Clone(r.Context())
|
||||
|
||||
impersonationHeaders := []string{
|
||||
transport.ImpersonateUserHeader,
|
||||
transport.ImpersonateGroupHeader,
|
||||
}
|
||||
|
||||
for k := range r.Header {
|
||||
if !strings.HasPrefix(k, transport.ImpersonateUserExtraHeaderPrefix) {
|
||||
continue
|
||||
}
|
||||
impersonationHeaders = append(impersonationHeaders, k)
|
||||
}
|
||||
|
||||
for _, header := range impersonationHeaders {
|
||||
r.Header.Del(header) // delay mutation until the end when we are done iterating over the map
|
||||
}
|
||||
}
|
||||
|
||||
delegate.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// No-op wrapping around AuthorizerFunc to allow for comparisons.
|
||||
type comparableAuthorizer struct {
|
||||
authorizer.AuthorizerFunc
|
||||
authorizerFunc
|
||||
}
|
||||
|
||||
// TODO: delete when we pick up https://github.com/kubernetes/kubernetes/pull/100963
|
||||
type authorizerFunc func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error)
|
||||
|
||||
func (f authorizerFunc) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
return f(ctx, a)
|
||||
}
|
||||
|
||||
func withBearerTokenPreservation(delegate http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// this looks a bit hacky but lets us avoid writing any logic for parsing out the bearer token
|
||||
var reqToken string
|
||||
_, _, _ = bearertoken.New(authenticator.TokenFunc(func(_ context.Context, token string) (*authenticator.Response, bool, error) {
|
||||
reqToken = token
|
||||
return nil, false, nil
|
||||
})).AuthenticateRequest(r)
|
||||
|
||||
// smuggle the token through the context. this does mean that we need to avoid logging the context.
|
||||
if len(reqToken) != 0 {
|
||||
ctx := context.WithValue(r.Context(), tokenKey, reqToken)
|
||||
r = r.WithContext(ctx)
|
||||
}
|
||||
|
||||
delegate.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func tokenFrom(ctx context.Context) string {
|
||||
token, _ := ctx.Value(tokenKey).(string)
|
||||
return token
|
||||
}
|
||||
|
||||
// contextKey type is unexported to prevent collisions.
|
||||
type contextKey int
|
||||
|
||||
const tokenKey contextKey = iota
|
||||
|
||||
func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapiserver.Config) http.Handler, error) {
|
||||
serverURL, err := url.Parse(restConfig.Host)
|
||||
if err != nil {
|
||||
@@ -240,11 +342,19 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get http/1.1 round tripper: %w", err)
|
||||
}
|
||||
http1RoundTripperAnonymous, err := getTransportForProtocol(rest.AnonymousClientConfig(restConfig), "http/1.1")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get http/1.1 anonymous round tripper: %w", err)
|
||||
}
|
||||
|
||||
http2RoundTripper, err := getTransportForProtocol(restConfig, "h2")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get http/2.0 round tripper: %w", err)
|
||||
}
|
||||
http2RoundTripperAnonymous, err := getTransportForProtocol(rest.AnonymousClientConfig(restConfig), "h2")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get http/2.0 anonymous round tripper: %w", err)
|
||||
}
|
||||
|
||||
return func(c *genericapiserver.Config) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -258,7 +368,7 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi
|
||||
}
|
||||
|
||||
if err := ensureNoImpersonationHeaders(r); err != nil {
|
||||
plog.Error("noImpersonationAuthorizer logic did not prevent nested impersonation but it is always supposed to do so",
|
||||
plog.Error("unknown impersonation header seen",
|
||||
err,
|
||||
"url", r.URL.String(),
|
||||
"method", r.Method,
|
||||
@@ -277,15 +387,28 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi
|
||||
return
|
||||
}
|
||||
|
||||
// KAS only supports upgrades via http/1.1 to websockets/SPDY (upgrades never use http/2.0)
|
||||
// Thus we default to using http/2.0 when the request is not an upgrade, otherwise we use http/1.1
|
||||
baseRT := http2RoundTripper
|
||||
isUpgradeRequest := httpstream.IsUpgradeRequest(r)
|
||||
if isUpgradeRequest {
|
||||
baseRT = http1RoundTripper
|
||||
ae := request.AuditEventFrom(r.Context())
|
||||
if ae == nil {
|
||||
plog.Warning("aggregated API server logic did not set audit event but it is always supposed to do so",
|
||||
"url", r.URL.String(),
|
||||
"method", r.Method,
|
||||
)
|
||||
newInternalErrResponse(w, r, c.Serializer, "invalid audit event")
|
||||
return
|
||||
}
|
||||
|
||||
rt, err := getTransportForUser(userInfo, baseRT)
|
||||
// grab the request's bearer token if present. this is optional and does not fail the request if missing.
|
||||
token := tokenFrom(r.Context())
|
||||
|
||||
// KAS only supports upgrades via http/1.1 to websockets/SPDY (upgrades never use http/2.0)
|
||||
// Thus we default to using http/2.0 when the request is not an upgrade, otherwise we use http/1.1
|
||||
baseRT, baseRTAnonymous := http2RoundTripper, http2RoundTripperAnonymous
|
||||
isUpgradeRequest := httpstream.IsUpgradeRequest(r)
|
||||
if isUpgradeRequest {
|
||||
baseRT, baseRTAnonymous = http1RoundTripper, http1RoundTripperAnonymous
|
||||
}
|
||||
|
||||
rt, err := getTransportForUser(r.Context(), userInfo, baseRT, baseRTAnonymous, ae, token, c.Authentication.Authenticator)
|
||||
if err != nil {
|
||||
plog.WarningErr("rejecting request as we cannot act as the current user", err,
|
||||
"url", r.URL.String(),
|
||||
@@ -332,6 +455,9 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi
|
||||
|
||||
func ensureNoImpersonationHeaders(r *http.Request) error {
|
||||
for key := range r.Header {
|
||||
// even though we have unit tests that try to cover this case, it is hard to tell if Go does
|
||||
// client side canonicalization on encode, server side canonicalization on decode, or both
|
||||
key := http.CanonicalHeaderKey(key)
|
||||
if strings.HasPrefix(key, "Impersonate") {
|
||||
return fmt.Errorf("%q header already exists", key)
|
||||
}
|
||||
@@ -340,31 +466,157 @@ func ensureNoImpersonationHeaders(r *http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getTransportForUser(userInfo user.Info, delegate http.RoundTripper) (http.RoundTripper, error) {
|
||||
if len(userInfo.GetUID()) == 0 {
|
||||
impersonateConfig := transport.ImpersonationConfig{
|
||||
UserName: userInfo.GetName(),
|
||||
Groups: userInfo.GetGroups(),
|
||||
Extra: userInfo.GetExtra(),
|
||||
}
|
||||
// transport.NewImpersonatingRoundTripper clones the request before setting headers
|
||||
// thus it will not accidentally mutate the input request (see http.Handler docs)
|
||||
return transport.NewImpersonatingRoundTripper(impersonateConfig, delegate), nil
|
||||
func getTransportForUser(ctx context.Context, userInfo user.Info, delegate, delegateAnonymous http.RoundTripper, ae *auditinternal.Event, token string, authenticator authenticator.Request) (http.RoundTripper, error) {
|
||||
if canImpersonateFully(userInfo) {
|
||||
return standardImpersonationRoundTripper(userInfo, ae, delegate)
|
||||
}
|
||||
|
||||
// 0. in the case of a request that is not attempting to do nested impersonation
|
||||
// 1. if we make the assumption that the TCR API does not issue tokens (or pass the TCR API bearer token
|
||||
// authenticator into this func - we need to know the authentication cred is something KAS would honor)
|
||||
// 2. then if preserve the incoming authorization header into the request's context
|
||||
// 3. we could reauthenticate it here (it would be a free cache hit)
|
||||
// 4. confirm that it matches the passed in user info (i.e. it was actually the cred used to authenticate and not a client cert)
|
||||
// 5. then we could issue a reverse proxy request using an anonymous rest config and the bearer token
|
||||
// 6. thus instead of impersonating the user, we would just be passing their request through
|
||||
// 7. this would preserve the UID info and thus allow us to safely support all token based auth
|
||||
// 8. the above would be safe even if in the future Kube started supporting UIDs asserted by client certs
|
||||
return nil, constable.Error("unexpected uid")
|
||||
return tokenPassthroughRoundTripper(ctx, delegateAnonymous, ae, token, authenticator)
|
||||
}
|
||||
|
||||
func canImpersonateFully(userInfo user.Info) bool {
|
||||
// nolint: gosimple // this structure is on purpose because we plan to expand this function
|
||||
if len(userInfo.GetUID()) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// once kube supports UID impersonation, add logic to detect if the KAS is
|
||||
// new enough to have this functionality and return true in that case as well
|
||||
return false
|
||||
}
|
||||
|
||||
func standardImpersonationRoundTripper(userInfo user.Info, ae *auditinternal.Event, delegate http.RoundTripper) (http.RoundTripper, error) {
|
||||
extra, err := buildExtra(userInfo.GetExtra(), ae)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
impersonateConfig := transport.ImpersonationConfig{
|
||||
UserName: userInfo.GetName(),
|
||||
Groups: userInfo.GetGroups(),
|
||||
Extra: extra,
|
||||
}
|
||||
// transport.NewImpersonatingRoundTripper clones the request before setting headers
|
||||
// thus it will not accidentally mutate the input request (see http.Handler docs)
|
||||
return transport.NewImpersonatingRoundTripper(impersonateConfig, delegate), nil
|
||||
}
|
||||
|
||||
func tokenPassthroughRoundTripper(ctx context.Context, delegateAnonymous http.RoundTripper, ae *auditinternal.Event, token string, authenticator authenticator.Request) (http.RoundTripper, error) {
|
||||
// all code below assumes KAS does not support UID impersonation because that case is handled in the standard path
|
||||
|
||||
// it also assumes that the TCR API does not issue tokens - if this assumption changes, we will need
|
||||
// some way to distinguish a token that is only valid against this impersonation proxy and not against KAS.
|
||||
// this code will fail closed because said TCR token would not work against KAS and the request would fail.
|
||||
|
||||
// if we get here we know the final user info had a UID
|
||||
// if the original user is also performing a nested impersonation, it means that said nested
|
||||
// impersonation is trying to impersonate a UID since final user info == ae.ImpersonatedUser
|
||||
// we know this KAS does not support UID impersonation so this request must be rejected
|
||||
if ae.ImpersonatedUser != nil {
|
||||
return nil, constable.Error("unable to impersonate uid")
|
||||
}
|
||||
|
||||
// see what KAS thinks this token translates into
|
||||
// this is important because certs have precedence over tokens and we want
|
||||
// to make sure that we do not get confused and pass along the wrong token
|
||||
tokenUser, err := tokenReview(ctx, token, authenticator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// we want to compare the result of the token authentication with the original user that made the request
|
||||
// if the user who made the request and the token do not match, we cannot go any further at this point
|
||||
if !apiequality.Semantic.DeepEqual(ae.User, tokenUser) {
|
||||
// this info leak seems fine for trace level logs
|
||||
plog.Trace("failed to passthrough token due to user mismatch",
|
||||
"original-username", ae.User.Username,
|
||||
"original-uid", ae.User.UID,
|
||||
"token-username", tokenUser.Username,
|
||||
"token-uid", tokenUser.UID,
|
||||
)
|
||||
return nil, constable.Error("token authenticated as a different user")
|
||||
}
|
||||
|
||||
// now we know that if we send this token to KAS, it will authenticate correctly
|
||||
return transport.NewBearerAuthRoundTripper(token, delegateAnonymous), nil
|
||||
}
|
||||
|
||||
func tokenReview(ctx context.Context, token string, authenticator authenticator.Request) (authenticationv1.UserInfo, error) {
|
||||
if len(token) == 0 {
|
||||
return authenticationv1.UserInfo{}, constable.Error("no token on request")
|
||||
}
|
||||
|
||||
// create a header that contains nothing but the token
|
||||
// an astute observer may ask "but what about the token's audience?"
|
||||
// in this case, we want to leave audiences unset per the token review docs:
|
||||
// > If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.
|
||||
// i.e. we want to make sure that the given token is valid against KAS
|
||||
fakeReq := &http.Request{Header: http.Header{}}
|
||||
fakeReq.Header.Set("Authorization", "Bearer "+token)
|
||||
|
||||
// propagate cancellation of parent context (without any values such as audience)
|
||||
fakeReq = fakeReq.WithContext(valuelesscontext.New(ctx))
|
||||
|
||||
// this will almost always be a free call that hits our 10 second cache TTL
|
||||
resp, ok, err := authenticator.AuthenticateRequest(fakeReq)
|
||||
if err != nil {
|
||||
return authenticationv1.UserInfo{}, err
|
||||
}
|
||||
if !ok {
|
||||
return authenticationv1.UserInfo{}, constable.Error("token failed to authenticate")
|
||||
}
|
||||
|
||||
tokenUser := authenticationv1.UserInfo{
|
||||
Username: resp.User.GetName(),
|
||||
UID: resp.User.GetUID(),
|
||||
Groups: resp.User.GetGroups(),
|
||||
Extra: make(map[string]authenticationv1.ExtraValue, len(resp.User.GetExtra())),
|
||||
}
|
||||
for k, v := range resp.User.GetExtra() {
|
||||
tokenUser.Extra[k] = v
|
||||
}
|
||||
|
||||
return tokenUser, nil
|
||||
}
|
||||
|
||||
func buildExtra(extra map[string][]string, ae *auditinternal.Event) (map[string][]string, error) {
|
||||
const reservedImpersonationProxySuffix = ".impersonation-proxy.concierge.pinniped.dev"
|
||||
|
||||
// always validate that the extra is something we support irregardless of nested impersonation
|
||||
for k := range extra {
|
||||
if !extraKeyRegexp.MatchString(k) {
|
||||
return nil, fmt.Errorf("disallowed extra key seen: %s", k)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(k, reservedImpersonationProxySuffix) {
|
||||
return nil, fmt.Errorf("disallowed extra key with reserved prefix seen: %s", k)
|
||||
}
|
||||
}
|
||||
|
||||
if ae.ImpersonatedUser == nil {
|
||||
return extra, nil // just return the given extra since nested impersonation is not being used
|
||||
}
|
||||
|
||||
// avoid mutating input map, preallocate new map to store original user info
|
||||
out := make(map[string][]string, len(extra)+1)
|
||||
|
||||
for k, v := range extra {
|
||||
out[k] = v // shallow copy of slice since we are not going to mutate it
|
||||
}
|
||||
|
||||
origUserInfoJSON, err := json.Marshal(ae.User)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out["original-user-info"+reservedImpersonationProxySuffix] = []string{string(origUserInfoJSON)}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// extraKeyRegexp is a very conservative regex to handle impersonation's extra key fidelity limitations such as casing and escaping.
|
||||
var extraKeyRegexp = regexp.MustCompile(`^[a-z0-9/\-._]+$`)
|
||||
|
||||
func newInternalErrResponse(w http.ResponseWriter, r *http.Request, s runtime.NegotiatedSerializer, msg string) {
|
||||
newStatusErrResponse(w, r, s, apierrors.NewInternalError(constable.Error(msg)))
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -90,7 +90,6 @@ func TestNew(t *testing.T) {
|
||||
|
||||
regularLoginGV.WithKind("CreateOptions"): reflect.TypeOf(&metav1.CreateOptions{}).Elem(),
|
||||
regularLoginGV.WithKind("DeleteOptions"): reflect.TypeOf(&metav1.DeleteOptions{}).Elem(),
|
||||
regularLoginGV.WithKind("ExportOptions"): reflect.TypeOf(&metav1.ExportOptions{}).Elem(),
|
||||
regularLoginGV.WithKind("GetOptions"): reflect.TypeOf(&metav1.GetOptions{}).Elem(),
|
||||
regularLoginGV.WithKind("ListOptions"): reflect.TypeOf(&metav1.ListOptions{}).Elem(),
|
||||
regularLoginGV.WithKind("PatchOptions"): reflect.TypeOf(&metav1.PatchOptions{}).Elem(),
|
||||
@@ -99,7 +98,6 @@ func TestNew(t *testing.T) {
|
||||
|
||||
regularIdentityGV.WithKind("CreateOptions"): reflect.TypeOf(&metav1.CreateOptions{}).Elem(),
|
||||
regularIdentityGV.WithKind("DeleteOptions"): reflect.TypeOf(&metav1.DeleteOptions{}).Elem(),
|
||||
regularIdentityGV.WithKind("ExportOptions"): reflect.TypeOf(&metav1.ExportOptions{}).Elem(),
|
||||
regularIdentityGV.WithKind("GetOptions"): reflect.TypeOf(&metav1.GetOptions{}).Elem(),
|
||||
regularIdentityGV.WithKind("ListOptions"): reflect.TypeOf(&metav1.ListOptions{}).Elem(),
|
||||
regularIdentityGV.WithKind("PatchOptions"): reflect.TypeOf(&metav1.PatchOptions{}).Elem(),
|
||||
@@ -120,7 +118,6 @@ func TestNew(t *testing.T) {
|
||||
metav1.Unversioned.WithKind("APIVersions"): reflect.TypeOf(&metav1.APIVersions{}).Elem(),
|
||||
metav1.Unversioned.WithKind("CreateOptions"): reflect.TypeOf(&metav1.CreateOptions{}).Elem(),
|
||||
metav1.Unversioned.WithKind("DeleteOptions"): reflect.TypeOf(&metav1.DeleteOptions{}).Elem(),
|
||||
metav1.Unversioned.WithKind("ExportOptions"): reflect.TypeOf(&metav1.ExportOptions{}).Elem(),
|
||||
metav1.Unversioned.WithKind("GetOptions"): reflect.TypeOf(&metav1.GetOptions{}).Elem(),
|
||||
metav1.Unversioned.WithKind("ListOptions"): reflect.TypeOf(&metav1.ListOptions{}).Elem(),
|
||||
metav1.Unversioned.WithKind("PatchOptions"): reflect.TypeOf(&metav1.PatchOptions{}).Elem(),
|
||||
@@ -151,7 +148,6 @@ func TestNew(t *testing.T) {
|
||||
|
||||
otherLoginGV.WithKind("CreateOptions"): reflect.TypeOf(&metav1.CreateOptions{}).Elem(),
|
||||
otherLoginGV.WithKind("DeleteOptions"): reflect.TypeOf(&metav1.DeleteOptions{}).Elem(),
|
||||
otherLoginGV.WithKind("ExportOptions"): reflect.TypeOf(&metav1.ExportOptions{}).Elem(),
|
||||
otherLoginGV.WithKind("GetOptions"): reflect.TypeOf(&metav1.GetOptions{}).Elem(),
|
||||
otherLoginGV.WithKind("ListOptions"): reflect.TypeOf(&metav1.ListOptions{}).Elem(),
|
||||
otherLoginGV.WithKind("PatchOptions"): reflect.TypeOf(&metav1.PatchOptions{}).Elem(),
|
||||
@@ -160,7 +156,6 @@ func TestNew(t *testing.T) {
|
||||
|
||||
otherIdentityGV.WithKind("CreateOptions"): reflect.TypeOf(&metav1.CreateOptions{}).Elem(),
|
||||
otherIdentityGV.WithKind("DeleteOptions"): reflect.TypeOf(&metav1.DeleteOptions{}).Elem(),
|
||||
otherIdentityGV.WithKind("ExportOptions"): reflect.TypeOf(&metav1.ExportOptions{}).Elem(),
|
||||
otherIdentityGV.WithKind("GetOptions"): reflect.TypeOf(&metav1.GetOptions{}).Elem(),
|
||||
otherIdentityGV.WithKind("ListOptions"): reflect.TypeOf(&metav1.ListOptions{}).Elem(),
|
||||
otherIdentityGV.WithKind("PatchOptions"): reflect.TypeOf(&metav1.PatchOptions{}).Elem(),
|
||||
@@ -181,7 +176,6 @@ func TestNew(t *testing.T) {
|
||||
metav1.Unversioned.WithKind("APIVersions"): reflect.TypeOf(&metav1.APIVersions{}).Elem(),
|
||||
metav1.Unversioned.WithKind("CreateOptions"): reflect.TypeOf(&metav1.CreateOptions{}).Elem(),
|
||||
metav1.Unversioned.WithKind("DeleteOptions"): reflect.TypeOf(&metav1.DeleteOptions{}).Elem(),
|
||||
metav1.Unversioned.WithKind("ExportOptions"): reflect.TypeOf(&metav1.ExportOptions{}).Elem(),
|
||||
metav1.Unversioned.WithKind("GetOptions"): reflect.TypeOf(&metav1.GetOptions{}).Elem(),
|
||||
metav1.Unversioned.WithKind("ListOptions"): reflect.TypeOf(&metav1.ListOptions{}).Elem(),
|
||||
metav1.Unversioned.WithKind("PatchOptions"): reflect.TypeOf(&metav1.PatchOptions{}).Elem(),
|
||||
|
||||
@@ -122,6 +122,9 @@ func validateNames(names *NamesConfigSpec) error {
|
||||
if names.ImpersonationSignerSecret == "" {
|
||||
missingNames = append(missingNames, "impersonationSignerSecret")
|
||||
}
|
||||
if names.AgentServiceAccount == "" {
|
||||
missingNames = append(missingNames, "agentServiceAccount")
|
||||
}
|
||||
if len(missingNames) > 0 {
|
||||
return constable.Error("missing required names: " + strings.Join(missingNames, ", "))
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@ func TestFromPath(t *testing.T) {
|
||||
impersonationCACertificateSecret: impersonationCACertificateSecret-value
|
||||
impersonationSignerSecret: impersonationSignerSecret-value
|
||||
impersonationSignerSecret: impersonationSignerSecret-value
|
||||
agentServiceAccount: agentServiceAccount-value
|
||||
labels:
|
||||
myLabelKey1: myLabelValue1
|
||||
myLabelKey2: myLabelValue2
|
||||
@@ -72,6 +73,7 @@ func TestFromPath(t *testing.T) {
|
||||
ImpersonationTLSCertificateSecret: "impersonationTLSCertificateSecret-value",
|
||||
ImpersonationCACertificateSecret: "impersonationCACertificateSecret-value",
|
||||
ImpersonationSignerSecret: "impersonationSignerSecret-value",
|
||||
AgentServiceAccount: "agentServiceAccount-value",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"myLabelKey1": "myLabelValue1",
|
||||
@@ -98,6 +100,7 @@ func TestFromPath(t *testing.T) {
|
||||
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
|
||||
impersonationCACertificateSecret: impersonationCACertificateSecret-value
|
||||
impersonationSignerSecret: impersonationSignerSecret-value
|
||||
agentServiceAccount: agentServiceAccount-value
|
||||
`),
|
||||
wantConfig: &Config{
|
||||
DiscoveryInfo: DiscoveryInfoSpec{
|
||||
@@ -119,6 +122,7 @@ func TestFromPath(t *testing.T) {
|
||||
ImpersonationTLSCertificateSecret: "impersonationTLSCertificateSecret-value",
|
||||
ImpersonationCACertificateSecret: "impersonationCACertificateSecret-value",
|
||||
ImpersonationSignerSecret: "impersonationSignerSecret-value",
|
||||
AgentServiceAccount: "agentServiceAccount-value",
|
||||
},
|
||||
Labels: map[string]string{},
|
||||
KubeCertAgentConfig: KubeCertAgentSpec{
|
||||
@@ -133,7 +137,7 @@ func TestFromPath(t *testing.T) {
|
||||
wantError: "validate names: missing required names: servingCertificateSecret, credentialIssuer, " +
|
||||
"apiService, impersonationConfigMap, impersonationLoadBalancerService, " +
|
||||
"impersonationTLSCertificateSecret, impersonationCACertificateSecret, " +
|
||||
"impersonationSignerSecret",
|
||||
"impersonationSignerSecret, agentServiceAccount",
|
||||
},
|
||||
{
|
||||
name: "Missing apiService name",
|
||||
@@ -147,6 +151,7 @@ func TestFromPath(t *testing.T) {
|
||||
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
|
||||
impersonationCACertificateSecret: impersonationCACertificateSecret-value
|
||||
impersonationSignerSecret: impersonationSignerSecret-value
|
||||
agentServiceAccount: agentServiceAccount-value
|
||||
`),
|
||||
wantError: "validate names: missing required names: apiService",
|
||||
},
|
||||
@@ -162,6 +167,7 @@ func TestFromPath(t *testing.T) {
|
||||
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
|
||||
impersonationCACertificateSecret: impersonationCACertificateSecret-value
|
||||
impersonationSignerSecret: impersonationSignerSecret-value
|
||||
agentServiceAccount: agentServiceAccount-value
|
||||
`),
|
||||
wantError: "validate names: missing required names: credentialIssuer",
|
||||
},
|
||||
@@ -177,6 +183,7 @@ func TestFromPath(t *testing.T) {
|
||||
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
|
||||
impersonationCACertificateSecret: impersonationCACertificateSecret-value
|
||||
impersonationSignerSecret: impersonationSignerSecret-value
|
||||
agentServiceAccount: agentServiceAccount-value
|
||||
`),
|
||||
wantError: "validate names: missing required names: servingCertificateSecret",
|
||||
},
|
||||
@@ -192,6 +199,7 @@ func TestFromPath(t *testing.T) {
|
||||
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
|
||||
impersonationCACertificateSecret: impersonationCACertificateSecret-value
|
||||
impersonationSignerSecret: impersonationSignerSecret-value
|
||||
agentServiceAccount: agentServiceAccount-value
|
||||
`),
|
||||
wantError: "validate names: missing required names: impersonationConfigMap",
|
||||
},
|
||||
@@ -207,6 +215,7 @@ func TestFromPath(t *testing.T) {
|
||||
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
|
||||
impersonationCACertificateSecret: impersonationCACertificateSecret-value
|
||||
impersonationSignerSecret: impersonationSignerSecret-value
|
||||
agentServiceAccount: agentServiceAccount-value
|
||||
`),
|
||||
wantError: "validate names: missing required names: impersonationLoadBalancerService",
|
||||
},
|
||||
@@ -222,6 +231,7 @@ func TestFromPath(t *testing.T) {
|
||||
impersonationLoadBalancerService: impersonationLoadBalancerService-value
|
||||
impersonationCACertificateSecret: impersonationCACertificateSecret-value
|
||||
impersonationSignerSecret: impersonationSignerSecret-value
|
||||
agentServiceAccount: agentServiceAccount-value
|
||||
`),
|
||||
wantError: "validate names: missing required names: impersonationTLSCertificateSecret",
|
||||
},
|
||||
@@ -237,6 +247,7 @@ func TestFromPath(t *testing.T) {
|
||||
impersonationLoadBalancerService: impersonationLoadBalancerService-value
|
||||
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
|
||||
impersonationSignerSecret: impersonationSignerSecret-value
|
||||
agentServiceAccount: agentServiceAccount-value
|
||||
`),
|
||||
wantError: "validate names: missing required names: impersonationCACertificateSecret",
|
||||
},
|
||||
@@ -252,6 +263,7 @@ func TestFromPath(t *testing.T) {
|
||||
impersonationLoadBalancerService: impersonationLoadBalancerService-value
|
||||
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
|
||||
impersonationCACertificateSecret: impersonationCACertificateSecret-value
|
||||
agentServiceAccount: agentServiceAccount-value
|
||||
`),
|
||||
wantError: "validate names: missing required names: impersonationSignerSecret",
|
||||
},
|
||||
@@ -265,6 +277,7 @@ func TestFromPath(t *testing.T) {
|
||||
apiService: pinniped-api
|
||||
impersonationLoadBalancerService: impersonationLoadBalancerService-value
|
||||
impersonationSignerSecret: impersonationSignerSecret-value
|
||||
agentServiceAccount: agentServiceAccount-value
|
||||
`),
|
||||
wantError: "validate names: missing required names: impersonationConfigMap, " +
|
||||
"impersonationTLSCertificateSecret, impersonationCACertificateSecret",
|
||||
|
||||
@@ -41,6 +41,7 @@ type NamesConfigSpec struct {
|
||||
ImpersonationTLSCertificateSecret string `json:"impersonationTLSCertificateSecret"`
|
||||
ImpersonationCACertificateSecret string `json:"impersonationCACertificateSecret"`
|
||||
ImpersonationSignerSecret string `json:"impersonationSignerSecret"`
|
||||
AgentServiceAccount string `json:"agentServiceAccount"`
|
||||
}
|
||||
|
||||
// ServingCertificateConfigSpec contains the configuration knobs for the API's
|
||||
|
||||
@@ -5,7 +5,9 @@
|
||||
package authenticator
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
|
||||
auth1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1"
|
||||
)
|
||||
@@ -22,8 +24,18 @@ type Closer interface {
|
||||
// nil CA bundle will be returned. If the provided spec contains a CA bundle that is not properly
|
||||
// encoded, an error will be returned.
|
||||
func CABundle(spec *auth1alpha1.TLSSpec) ([]byte, error) {
|
||||
if spec == nil {
|
||||
if spec == nil || len(spec.CertificateAuthorityData) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return base64.StdEncoding.DecodeString(spec.CertificateAuthorityData)
|
||||
|
||||
pem, err := base64.StdEncoding.DecodeString(spec.CertificateAuthorityData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ok := x509.NewCertPool().AppendCertsFromPEM(pem); !ok {
|
||||
return nil, fmt.Errorf("certificateAuthorityData is not valid PEM")
|
||||
}
|
||||
|
||||
return pem, nil
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
loginapi "go.pinniped.dev/generated/latest/apis/concierge/login"
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
"go.pinniped.dev/internal/valuelesscontext"
|
||||
)
|
||||
|
||||
// ErrNoSuchAuthenticator is returned by Cache.AuthenticateTokenCredentialRequest() when the requested authenticator is not configured.
|
||||
@@ -101,7 +102,7 @@ func (c *Cache) AuthenticateTokenCredentialRequest(ctx context.Context, req *log
|
||||
|
||||
// The incoming context could have an audience. Since we do not want to handle audiences right now, do not pass it
|
||||
// through directly to the authentication webhook.
|
||||
ctx = valuelessContext{ctx}
|
||||
ctx = valuelesscontext.New(ctx)
|
||||
|
||||
// Call the selected authenticator.
|
||||
resp, authenticated, err := val.AuthenticateToken(ctx, req.Spec.Token)
|
||||
@@ -119,7 +120,3 @@ func (c *Cache) AuthenticateTokenCredentialRequest(ctx context.Context, req *log
|
||||
}
|
||||
return respUser, nil
|
||||
}
|
||||
|
||||
type valuelessContext struct{ context.Context }
|
||||
|
||||
func (valuelessContext) Value(interface{}) interface{} { return nil }
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -25,6 +26,7 @@ import (
|
||||
"gopkg.in/square/go-jose.v2/jwt"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
|
||||
@@ -345,15 +347,6 @@ func TestController(t *testing.T) {
|
||||
return // end of test unless we wanted to run tests on the resulting authenticator from the cache
|
||||
}
|
||||
|
||||
// The implementation of AuthenticateToken() that we use waits 10 seconds after creation to
|
||||
// perform OIDC discovery. Therefore, the JWTAuthenticator is not functional for the first 10
|
||||
// seconds. We sleep for 13 seconds in this unit test to give a little bit of cushion to that 10
|
||||
// second delay.
|
||||
//
|
||||
// We should get rid of this 10 second delay. See
|
||||
// https://github.com/vmware-tanzu/pinniped/issues/260.
|
||||
time.Sleep(time.Second * 13)
|
||||
|
||||
// We expected the cache to have an entry, so pull that entry from the cache and test it.
|
||||
expectedCacheKey := authncache.Key{
|
||||
APIGroup: auth1alpha1.GroupName,
|
||||
@@ -428,7 +421,17 @@ func TestController(t *testing.T) {
|
||||
tt.wantUsernameClaim,
|
||||
username,
|
||||
)
|
||||
rsp, authenticated, err := cachedAuthenticator.AuthenticateToken(context.Background(), jwt)
|
||||
|
||||
// Loop for a while here to allow the underlying OIDC authenticator to initialize itself asynchronously.
|
||||
var (
|
||||
rsp *authenticator.Response
|
||||
authenticated bool
|
||||
err error
|
||||
)
|
||||
_ = wait.PollImmediate(10*time.Millisecond, 5*time.Second, func() (bool, error) {
|
||||
rsp, authenticated, err = cachedAuthenticator.AuthenticateToken(context.Background(), jwt)
|
||||
return !isNotInitialized(err), nil
|
||||
})
|
||||
if test.wantErrorRegexp != "" {
|
||||
require.Error(t, err)
|
||||
require.Regexp(t, test.wantErrorRegexp, err.Error())
|
||||
@@ -443,6 +446,12 @@ func TestController(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// isNotInitialized checks if the error is the internally-defined "oidc: authenticator not initialized" error from
|
||||
// the underlying OIDC authenticator, which is initialized asynchronously.
|
||||
func isNotInitialized(err error) bool {
|
||||
return err != nil && strings.Contains(err.Error(), "authenticator not initialized")
|
||||
}
|
||||
|
||||
func testTableForAuthenticateTokenTests(
|
||||
t *testing.T,
|
||||
goodRSASigningKey *rsa.PrivateKey,
|
||||
|
||||
@@ -135,6 +135,15 @@ func TestNewWebhookAuthenticator(t *testing.T) {
|
||||
require.EqualError(t, err, "invalid TLS configuration: illegal base64 data at input byte 7")
|
||||
})
|
||||
|
||||
t.Run("invalid pem data", func(t *testing.T) {
|
||||
res, err := newWebhookAuthenticator(&auth1alpha1.WebhookAuthenticatorSpec{
|
||||
Endpoint: "https://example.com",
|
||||
TLS: &auth1alpha1.TLSSpec{CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte("bad data"))},
|
||||
}, ioutil.TempFile, clientcmd.WriteToFile)
|
||||
require.Nil(t, res)
|
||||
require.EqualError(t, err, "invalid TLS configuration: certificateAuthorityData is not valid PEM")
|
||||
})
|
||||
|
||||
t.Run("valid config with no TLS spec", func(t *testing.T) {
|
||||
res, err := newWebhookAuthenticator(&auth1alpha1.WebhookAuthenticatorSpec{
|
||||
Endpoint: "https://example.com",
|
||||
|
||||
@@ -1,219 +0,0 @@
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubecertagent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
pinnipedclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controller/issuerconfig"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
// These constants are the default values for the kube-controller-manager flags. If the flags are
|
||||
// not properly set on the kube-controller-manager process, then we will fallback to using these.
|
||||
const (
|
||||
k8sAPIServerCACertPEMDefaultPath = "/etc/kubernetes/ca/ca.pem"
|
||||
k8sAPIServerCAKeyPEMDefaultPath = "/etc/kubernetes/ca/ca.key"
|
||||
)
|
||||
|
||||
type annotaterController struct {
|
||||
agentPodConfig *AgentPodConfig
|
||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig
|
||||
credentialIssuerLabels map[string]string
|
||||
clock clock.Clock
|
||||
k8sClient kubernetes.Interface
|
||||
pinnipedAPIClient pinnipedclientset.Interface
|
||||
kubeSystemPodInformer corev1informers.PodInformer
|
||||
agentPodInformer corev1informers.PodInformer
|
||||
}
|
||||
|
||||
// NewAnnotaterController returns a controller that updates agent pods with the path to the kube
|
||||
// API's certificate and key.
|
||||
//
|
||||
// This controller will add annotations to agent pods with the best-guess paths to the kube API's
|
||||
// certificate and key.
|
||||
//
|
||||
// It also is tasked with updating the CredentialIssuer, located via the provided
|
||||
// credentialIssuerLocationConfig, with any errors that it encounters.
|
||||
func NewAnnotaterController(
|
||||
agentPodConfig *AgentPodConfig,
|
||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig,
|
||||
credentialIssuerLabels map[string]string,
|
||||
clock clock.Clock,
|
||||
k8sClient kubernetes.Interface,
|
||||
pinnipedAPIClient pinnipedclientset.Interface,
|
||||
kubeSystemPodInformer corev1informers.PodInformer,
|
||||
agentPodInformer corev1informers.PodInformer,
|
||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
||||
) controllerlib.Controller {
|
||||
return controllerlib.New(
|
||||
controllerlib.Config{
|
||||
Name: "kube-cert-agent-annotater-controller",
|
||||
Syncer: &annotaterController{
|
||||
agentPodConfig: agentPodConfig,
|
||||
credentialIssuerLocationConfig: credentialIssuerLocationConfig,
|
||||
credentialIssuerLabels: credentialIssuerLabels,
|
||||
clock: clock,
|
||||
k8sClient: k8sClient,
|
||||
pinnipedAPIClient: pinnipedAPIClient,
|
||||
kubeSystemPodInformer: kubeSystemPodInformer,
|
||||
agentPodInformer: agentPodInformer,
|
||||
},
|
||||
},
|
||||
withInformer(
|
||||
kubeSystemPodInformer,
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isControllerManagerPod),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
withInformer(
|
||||
agentPodInformer,
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isAgentPod),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// Sync implements controllerlib.Syncer.
|
||||
func (c *annotaterController) Sync(ctx controllerlib.Context) error {
|
||||
agentPods, err := c.agentPodInformer.
|
||||
Lister().
|
||||
Pods(c.agentPodConfig.Namespace).
|
||||
List(c.agentPodConfig.AgentSelector())
|
||||
if err != nil {
|
||||
return fmt.Errorf("informer cannot list agent pods: %w", err)
|
||||
}
|
||||
|
||||
for _, agentPod := range agentPods {
|
||||
controllerManagerPod, err := findControllerManagerPodForSpecificAgentPod(agentPod, c.kubeSystemPodInformer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if controllerManagerPod == nil {
|
||||
// The deleter will clean this orphaned agent.
|
||||
continue
|
||||
}
|
||||
|
||||
certPath := getContainerArgByName(
|
||||
controllerManagerPod,
|
||||
"cluster-signing-cert-file",
|
||||
k8sAPIServerCACertPEMDefaultPath,
|
||||
)
|
||||
keyPath := getContainerArgByName(
|
||||
controllerManagerPod,
|
||||
"cluster-signing-key-file",
|
||||
k8sAPIServerCAKeyPEMDefaultPath,
|
||||
)
|
||||
if err := c.maybeUpdateAgentPod(
|
||||
ctx.Context,
|
||||
agentPod.Name,
|
||||
agentPod.Namespace,
|
||||
certPath,
|
||||
keyPath,
|
||||
); err != nil {
|
||||
err = fmt.Errorf("cannot update agent pod: %w", err)
|
||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
||||
ctx.Context,
|
||||
c.credentialIssuerLocationConfig.Name,
|
||||
c.credentialIssuerLabels,
|
||||
c.pinnipedAPIClient,
|
||||
strategyError(c.clock, err),
|
||||
)
|
||||
if strategyResultUpdateErr != nil {
|
||||
// If the CI update fails, then we probably want to try again. This controller will get
|
||||
// called again because of the pod create failure, so just try the CI update again then.
|
||||
klog.ErrorS(strategyResultUpdateErr, "could not create or update CredentialIssuer")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *annotaterController) maybeUpdateAgentPod(
|
||||
ctx context.Context,
|
||||
name string,
|
||||
namespace string,
|
||||
certPath string,
|
||||
keyPath string,
|
||||
) error {
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
agentPod, err := c.k8sClient.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if agentPod.Annotations[agentPodCertPathAnnotationKey] != certPath ||
|
||||
agentPod.Annotations[agentPodKeyPathAnnotationKey] != keyPath {
|
||||
if err := c.reallyUpdateAgentPod(
|
||||
ctx,
|
||||
agentPod,
|
||||
certPath,
|
||||
keyPath,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (c *annotaterController) reallyUpdateAgentPod(
|
||||
ctx context.Context,
|
||||
agentPod *corev1.Pod,
|
||||
certPath string,
|
||||
keyPath string,
|
||||
) error {
|
||||
// Create a deep copy of the agent pod since it is coming straight from the cache.
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
if updatedAgentPod.Annotations == nil {
|
||||
updatedAgentPod.Annotations = make(map[string]string)
|
||||
}
|
||||
updatedAgentPod.Annotations[agentPodCertPathAnnotationKey] = certPath
|
||||
updatedAgentPod.Annotations[agentPodKeyPathAnnotationKey] = keyPath
|
||||
|
||||
plog.Debug(
|
||||
"updating agent pod annotations",
|
||||
"pod",
|
||||
klog.KObj(updatedAgentPod),
|
||||
"certPath",
|
||||
certPath,
|
||||
"keyPath",
|
||||
keyPath,
|
||||
)
|
||||
_, err := c.k8sClient.
|
||||
CoreV1().
|
||||
Pods(agentPod.Namespace).
|
||||
Update(ctx, updatedAgentPod, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func getContainerArgByName(pod *corev1.Pod, name, fallbackValue string) string {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
flagset := pflag.NewFlagSet("", pflag.ContinueOnError)
|
||||
flagset.ParseErrorsWhitelist = pflag.ParseErrorsWhitelist{UnknownFlags: true}
|
||||
var val string
|
||||
flagset.StringVar(&val, name, "", "")
|
||||
_ = flagset.Parse(append(container.Command, container.Args...))
|
||||
if val != "" {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return fallbackValue
|
||||
}
|
||||
@@ -1,727 +0,0 @@
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubecertagent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
||||
coretesting "k8s.io/client-go/testing"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||
pinnipedfake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
)
|
||||
|
||||
func TestAnnotaterControllerFilter(t *testing.T) {
|
||||
defineSharedKubecertagentFilterSpecs(
|
||||
t,
|
||||
"AnnotaterControllerFilter",
|
||||
func(
|
||||
agentPodConfig *AgentPodConfig,
|
||||
_ *CredentialIssuerLocationConfig,
|
||||
kubeSystemPodInformer corev1informers.PodInformer,
|
||||
agentPodInformer corev1informers.PodInformer,
|
||||
observableWithInformerOption *testutil.ObservableWithInformerOption,
|
||||
) {
|
||||
_ = NewAnnotaterController(
|
||||
agentPodConfig,
|
||||
nil, // credentialIssuerLabels, shouldn't matter
|
||||
nil, // credentialIssuerLocationConfig, shouldn't matter
|
||||
nil, // clock, shouldn't matter
|
||||
nil, // k8sClient, shouldn't matter
|
||||
nil, // pinnipedClient, shouldn't matter
|
||||
kubeSystemPodInformer,
|
||||
agentPodInformer,
|
||||
observableWithInformerOption.WithInformer,
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestAnnotaterControllerSync(t *testing.T) {
|
||||
spec.Run(t, "AnnotaterControllerSync", func(t *testing.T, when spec.G, it spec.S) {
|
||||
const kubeSystemNamespace = "kube-system"
|
||||
const agentPodNamespace = "agent-pod-namespace"
|
||||
const defaultKubeControllerManagerClusterSigningCertFileFlagValue = "/etc/kubernetes/ca/ca.pem"
|
||||
const defaultKubeControllerManagerClusterSigningKeyFileFlagValue = "/etc/kubernetes/ca/ca.key"
|
||||
const credentialIssuerResourceName = "ci-resource-name"
|
||||
|
||||
const (
|
||||
certPath = "some-cert-path"
|
||||
certPathAnnotation = "kube-cert-agent.pinniped.dev/cert-path"
|
||||
|
||||
keyPath = "some-key-path"
|
||||
keyPathAnnotation = "kube-cert-agent.pinniped.dev/key-path"
|
||||
)
|
||||
|
||||
var r *require.Assertions
|
||||
|
||||
var subject controllerlib.Controller
|
||||
var kubeAPIClient *kubernetesfake.Clientset
|
||||
var kubeSystemInformerClient *kubernetesfake.Clientset
|
||||
var kubeSystemInformers kubeinformers.SharedInformerFactory
|
||||
var agentInformerClient *kubernetesfake.Clientset
|
||||
var agentInformers kubeinformers.SharedInformerFactory
|
||||
var pinnipedAPIClient *pinnipedfake.Clientset
|
||||
var cancelContext context.Context
|
||||
var cancelContextCancelFunc context.CancelFunc
|
||||
var syncContext *controllerlib.Context
|
||||
var controllerManagerPod, agentPod *corev1.Pod
|
||||
var podsGVR schema.GroupVersionResource
|
||||
var credentialIssuerGVR schema.GroupVersionResource
|
||||
var frozenNow time.Time
|
||||
var credentialIssuerLabels map[string]string
|
||||
|
||||
// Defer starting the informers until the last possible moment so that the
|
||||
// nested Before's can keep adding things to the informer caches.
|
||||
var startInformersAndController = func() {
|
||||
// Set this at the last second to allow for injection of server override.
|
||||
subject = NewAnnotaterController(
|
||||
&AgentPodConfig{
|
||||
Namespace: agentPodNamespace,
|
||||
ContainerImage: "some-agent-image",
|
||||
PodNamePrefix: "some-agent-name-",
|
||||
AdditionalLabels: map[string]string{
|
||||
"myLabelKey1": "myLabelValue1",
|
||||
"myLabelKey2": "myLabelValue2",
|
||||
},
|
||||
},
|
||||
&CredentialIssuerLocationConfig{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
credentialIssuerLabels,
|
||||
clock.NewFakeClock(frozenNow),
|
||||
kubeAPIClient,
|
||||
pinnipedAPIClient,
|
||||
kubeSystemInformers.Core().V1().Pods(),
|
||||
agentInformers.Core().V1().Pods(),
|
||||
controllerlib.WithInformer,
|
||||
)
|
||||
|
||||
// Set this at the last second to support calling subject.Name().
|
||||
syncContext = &controllerlib.Context{
|
||||
Context: cancelContext,
|
||||
Name: subject.Name(),
|
||||
Key: controllerlib.Key{
|
||||
Namespace: kubeSystemNamespace,
|
||||
Name: "should-not-matter",
|
||||
},
|
||||
}
|
||||
|
||||
// Must start informers before calling TestRunSynchronously()
|
||||
kubeSystemInformers.Start(cancelContext.Done())
|
||||
agentInformers.Start(cancelContext.Done())
|
||||
controllerlib.TestRunSynchronously(t, subject)
|
||||
}
|
||||
|
||||
it.Before(func() {
|
||||
r = require.New(t)
|
||||
|
||||
kubeAPIClient = kubernetesfake.NewSimpleClientset()
|
||||
|
||||
kubeSystemInformerClient = kubernetesfake.NewSimpleClientset()
|
||||
kubeSystemInformers = kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
|
||||
|
||||
agentInformerClient = kubernetesfake.NewSimpleClientset()
|
||||
agentInformers = kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
|
||||
|
||||
pinnipedAPIClient = pinnipedfake.NewSimpleClientset()
|
||||
|
||||
cancelContext, cancelContextCancelFunc = context.WithCancel(context.Background())
|
||||
|
||||
controllerManagerPod, agentPod = exampleControllerManagerAndAgentPods(
|
||||
kubeSystemNamespace, agentPodNamespace, certPath, keyPath,
|
||||
)
|
||||
|
||||
podsGVR = schema.GroupVersionResource{
|
||||
Group: corev1.SchemeGroupVersion.Group,
|
||||
Version: corev1.SchemeGroupVersion.Version,
|
||||
Resource: "pods",
|
||||
}
|
||||
|
||||
credentialIssuerGVR = schema.GroupVersionResource{
|
||||
Group: configv1alpha1.GroupName,
|
||||
Version: configv1alpha1.SchemeGroupVersion.Version,
|
||||
Resource: "credentialissuers",
|
||||
}
|
||||
|
||||
frozenNow = time.Date(2020, time.September, 23, 7, 42, 0, 0, time.Local)
|
||||
|
||||
// Add a pod into the test that doesn't matter to make sure we don't accidentally trigger any
|
||||
// logic on this thing.
|
||||
ignorablePod := corev1.Pod{}
|
||||
ignorablePod.Name = "some-ignorable-pod"
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(&ignorablePod))
|
||||
r.NoError(agentInformerClient.Tracker().Add(&ignorablePod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(&ignorablePod))
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
cancelContextCancelFunc()
|
||||
})
|
||||
|
||||
when("there is an agent pod without annotations set", func() {
|
||||
it.Before(func() {
|
||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
||||
})
|
||||
|
||||
when("there is a matching controller manager pod", func() {
|
||||
it.Before(func() {
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("updates the annotations according to the controller manager pod", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
||||
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
coretesting.NewGetAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod.Name,
|
||||
),
|
||||
coretesting.NewUpdateAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod,
|
||||
),
|
||||
},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
|
||||
when("updating the agent pod fails", func() {
|
||||
it.Before(func() {
|
||||
kubeAPIClient.PrependReactor(
|
||||
"update",
|
||||
"pods",
|
||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some update error")
|
||||
},
|
||||
)
|
||||
})
|
||||
|
||||
it("returns the error", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
r.EqualError(err, "cannot update agent pod: some update error")
|
||||
})
|
||||
|
||||
when("there is already a CredentialIssuer", func() {
|
||||
var initialCredentialIssuer *configv1alpha1.CredentialIssuer
|
||||
|
||||
it.Before(func() {
|
||||
initialCredentialIssuer = &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{},
|
||||
},
|
||||
}
|
||||
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuer))
|
||||
})
|
||||
|
||||
it("updates the CredentialIssuer status with the error", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
expectedCredentialIssuer := initialCredentialIssuer.DeepCopy()
|
||||
expectedCredentialIssuer.Status.Strategies = []configv1alpha1.CredentialIssuerStrategy{
|
||||
{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: "cannot update agent pod: some update error",
|
||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
||||
},
|
||||
}
|
||||
expectedGetAction := coretesting.NewRootGetAction(
|
||||
credentialIssuerGVR,
|
||||
credentialIssuerResourceName,
|
||||
)
|
||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
||||
credentialIssuerGVR,
|
||||
"status",
|
||||
expectedCredentialIssuer,
|
||||
)
|
||||
|
||||
r.EqualError(err, "cannot update agent pod: some update error")
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
expectedGetAction,
|
||||
expectedUpdateAction,
|
||||
},
|
||||
pinnipedAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
|
||||
when("updating the CredentialIssuer fails", func() {
|
||||
it.Before(func() {
|
||||
pinnipedAPIClient.PrependReactor(
|
||||
"update",
|
||||
"credentialissuers",
|
||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some update error")
|
||||
},
|
||||
)
|
||||
})
|
||||
|
||||
it("returns the original pod update error so the controller gets scheduled again", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
r.EqualError(err, "cannot update agent pod: some update error")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("there is not already a CredentialIssuer", func() {
|
||||
it.Before(func() {
|
||||
credentialIssuerLabels = map[string]string{"foo": "bar"}
|
||||
})
|
||||
|
||||
it("creates the CredentialIssuer status with the error", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}
|
||||
|
||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
||||
{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: "cannot update agent pod: some update error",
|
||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedGetAction := coretesting.NewRootGetAction(
|
||||
credentialIssuerGVR,
|
||||
credentialIssuerResourceName,
|
||||
)
|
||||
expectedCreateAction := coretesting.NewRootCreateAction(
|
||||
credentialIssuerGVR,
|
||||
expectedCreateCredentialIssuer,
|
||||
)
|
||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
||||
credentialIssuerGVR,
|
||||
"status",
|
||||
expectedCredentialIssuer,
|
||||
)
|
||||
|
||||
r.EqualError(err, "cannot update agent pod: some update error")
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
expectedGetAction,
|
||||
expectedCreateAction,
|
||||
expectedUpdateAction,
|
||||
},
|
||||
pinnipedAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("there is a controller manager pod with CLI flag values separated by spaces", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
||||
"kube-controller-manager",
|
||||
"--cluster-signing-cert-file", certPath,
|
||||
"--cluster-signing-key-file", keyPath,
|
||||
}
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("updates the annotations according to the controller manager pod", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
||||
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
coretesting.NewGetAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod.Name,
|
||||
),
|
||||
coretesting.NewUpdateAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod,
|
||||
),
|
||||
},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
when("there is a controller manager pod with no CLI flags", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
||||
"kube-controller-manager",
|
||||
}
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("updates the annotations with the default values", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Annotations[certPathAnnotation] = defaultKubeControllerManagerClusterSigningCertFileFlagValue
|
||||
updatedAgentPod.Annotations[keyPathAnnotation] = defaultKubeControllerManagerClusterSigningKeyFileFlagValue
|
||||
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
coretesting.NewGetAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod.Name,
|
||||
),
|
||||
coretesting.NewUpdateAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod,
|
||||
),
|
||||
},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
when("there is a controller manager pod with unparsable CLI flags", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
||||
"kube-controller-manager",
|
||||
"--cluster-signing-cert-file-blah", certPath,
|
||||
"--cluster-signing-key-file-blah", keyPath,
|
||||
}
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("updates the annotations with the default values", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Annotations[certPathAnnotation] = defaultKubeControllerManagerClusterSigningCertFileFlagValue
|
||||
updatedAgentPod.Annotations[keyPathAnnotation] = defaultKubeControllerManagerClusterSigningKeyFileFlagValue
|
||||
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
coretesting.NewGetAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod.Name,
|
||||
),
|
||||
coretesting.NewUpdateAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod,
|
||||
),
|
||||
},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
when("there is a controller manager pod with unparsable cert CLI flag", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
||||
"kube-controller-manager",
|
||||
"--cluster-signing-cert-file-blah", certPath,
|
||||
"--cluster-signing-key-file", keyPath,
|
||||
}
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("updates the key annotation with the default cert flag value", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Annotations[certPathAnnotation] = defaultKubeControllerManagerClusterSigningCertFileFlagValue
|
||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
||||
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
coretesting.NewGetAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod.Name,
|
||||
),
|
||||
coretesting.NewUpdateAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod,
|
||||
),
|
||||
},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
when("there is a controller manager pod with unparsable key CLI flag", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
||||
"kube-controller-manager",
|
||||
"--cluster-signing-cert-file", certPath,
|
||||
"--cluster-signing-key-file-blah", keyPath,
|
||||
}
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("updates the cert annotation with the default key flag value", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
||||
updatedAgentPod.Annotations[keyPathAnnotation] = defaultKubeControllerManagerClusterSigningKeyFileFlagValue
|
||||
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
coretesting.NewGetAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod.Name,
|
||||
),
|
||||
coretesting.NewUpdateAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod,
|
||||
),
|
||||
},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
when("there is a non-matching controller manager pod via uid", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.UID = "some-other-controller-manager-uid"
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("does nothing; the deleter will delete this pod to trigger resync", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
r.Equal(
|
||||
[]coretesting.Action{},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
when("there is a non-matching controller manager pod via name", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.Name = "some-other-controller-manager-name"
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("does nothing; the deleter will delete this pod to trigger resync", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
r.Equal(
|
||||
[]coretesting.Action{},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("there is an agent pod without annotations set which does not have the configured additional labels", func() {
|
||||
it.Before(func() {
|
||||
delete(agentPod.ObjectMeta.Labels, "myLabelKey1")
|
||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
||||
})
|
||||
|
||||
when("there is a matching controller manager pod", func() {
|
||||
it.Before(func() {
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("updates the annotations according to the controller manager pod", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
||||
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
coretesting.NewGetAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod.Name,
|
||||
),
|
||||
coretesting.NewUpdateAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod,
|
||||
),
|
||||
},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("there is an agent pod with correct annotations set", func() {
|
||||
it.Before(func() {
|
||||
agentPod.Annotations = make(map[string]string)
|
||||
agentPod.Annotations[certPathAnnotation] = certPath
|
||||
agentPod.Annotations[keyPathAnnotation] = keyPath
|
||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
||||
})
|
||||
|
||||
when("there is a matching controller manager pod", func() {
|
||||
it.Before(func() {
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("does nothing since the pod is up to date", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
r.Equal(
|
||||
[]coretesting.Action{},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("there is an agent pod with the wrong cert annotation", func() {
|
||||
it.Before(func() {
|
||||
agentPod.Annotations[certPathAnnotation] = "wrong"
|
||||
agentPod.Annotations[keyPathAnnotation] = keyPath
|
||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
||||
})
|
||||
|
||||
when("there is a matching controller manager pod", func() {
|
||||
it.Before(func() {
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("updates the agent with the correct cert annotation", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
coretesting.NewGetAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod.Name,
|
||||
),
|
||||
coretesting.NewUpdateAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod,
|
||||
),
|
||||
},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("there is an agent pod with the wrong key annotation", func() {
|
||||
it.Before(func() {
|
||||
agentPod.Annotations[certPathAnnotation] = certPath
|
||||
agentPod.Annotations[keyPathAnnotation] = "key"
|
||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
||||
})
|
||||
|
||||
when("there is a matching controller manager pod", func() {
|
||||
it.Before(func() {
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("updates the agent with the correct key annotation", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
coretesting.NewGetAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod.Name,
|
||||
),
|
||||
coretesting.NewUpdateAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
updatedAgentPod,
|
||||
),
|
||||
},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubecertagent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
pinnipedclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
||||
"go.pinniped.dev/internal/constable"
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controller/issuerconfig"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
type createrController struct {
|
||||
agentPodConfig *AgentPodConfig
|
||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig
|
||||
credentialIssuerLabels map[string]string
|
||||
clock clock.Clock
|
||||
k8sClient kubernetes.Interface
|
||||
pinnipedAPIClient pinnipedclientset.Interface
|
||||
kubeSystemPodInformer corev1informers.PodInformer
|
||||
agentPodInformer corev1informers.PodInformer
|
||||
}
|
||||
|
||||
// NewCreaterController returns a controller that creates new kube-cert-agent pods for every known
|
||||
// kube-controller-manager pod.
|
||||
//
|
||||
// It also is tasked with updating the CredentialIssuer, located via the provided
|
||||
// credentialIssuerLocationConfig, with any errors that it encounters.
|
||||
func NewCreaterController(
|
||||
agentPodConfig *AgentPodConfig,
|
||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig,
|
||||
credentialIssuerLabels map[string]string,
|
||||
clock clock.Clock,
|
||||
k8sClient kubernetes.Interface,
|
||||
pinnipedAPIClient pinnipedclientset.Interface,
|
||||
kubeSystemPodInformer corev1informers.PodInformer,
|
||||
agentPodInformer corev1informers.PodInformer,
|
||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
||||
withInitialEvent pinnipedcontroller.WithInitialEventOptionFunc,
|
||||
) controllerlib.Controller {
|
||||
return controllerlib.New(
|
||||
controllerlib.Config{
|
||||
//nolint: misspell
|
||||
Name: "kube-cert-agent-creater-controller",
|
||||
Syncer: &createrController{
|
||||
agentPodConfig: agentPodConfig,
|
||||
credentialIssuerLocationConfig: credentialIssuerLocationConfig,
|
||||
credentialIssuerLabels: credentialIssuerLabels,
|
||||
clock: clock,
|
||||
k8sClient: k8sClient,
|
||||
pinnipedAPIClient: pinnipedAPIClient,
|
||||
kubeSystemPodInformer: kubeSystemPodInformer,
|
||||
agentPodInformer: agentPodInformer,
|
||||
},
|
||||
},
|
||||
withInformer(
|
||||
kubeSystemPodInformer,
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isControllerManagerPod),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
withInformer(
|
||||
agentPodInformer,
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isAgentPod),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
// Be sure to run once even to make sure the CI is updated if there are no controller manager
|
||||
// pods. We should be able to pass an empty key since we don't use the key in the sync (we sync
|
||||
// the world).
|
||||
withInitialEvent(controllerlib.Key{}),
|
||||
)
|
||||
}
|
||||
|
||||
// Sync implements controllerlib.Syncer.
|
||||
func (c *createrController) Sync(ctx controllerlib.Context) error {
|
||||
controllerManagerSelector, err := labels.Parse("component=kube-controller-manager")
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create controller manager selector: %w", err)
|
||||
}
|
||||
|
||||
controllerManagerPods, err := c.kubeSystemPodInformer.Lister().List(controllerManagerSelector)
|
||||
if err != nil {
|
||||
return fmt.Errorf("informer cannot list controller manager pods: %w", err)
|
||||
}
|
||||
|
||||
if len(controllerManagerPods) == 0 {
|
||||
// If there are no controller manager pods, we alert the user that we can't find the keypair via
|
||||
// the CredentialIssuer.
|
||||
return issuerconfig.UpdateStrategy(
|
||||
ctx.Context,
|
||||
c.credentialIssuerLocationConfig.Name,
|
||||
c.credentialIssuerLabels,
|
||||
c.pinnipedAPIClient,
|
||||
strategyError(c.clock, constable.Error("did not find kube-controller-manager pod(s)")),
|
||||
)
|
||||
}
|
||||
|
||||
for _, controllerManagerPod := range controllerManagerPods {
|
||||
agentPod, err := findAgentPodForSpecificControllerManagerPod(
|
||||
controllerManagerPod,
|
||||
c.kubeSystemPodInformer,
|
||||
c.agentPodInformer,
|
||||
c.agentPodConfig.AgentSelector(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if agentPod == nil {
|
||||
agentPod = c.agentPodConfig.newAgentPod(controllerManagerPod)
|
||||
|
||||
plog.Debug(
|
||||
"creating agent pod",
|
||||
"pod",
|
||||
klog.KObj(agentPod),
|
||||
"controller",
|
||||
klog.KObj(controllerManagerPod),
|
||||
)
|
||||
_, err := c.k8sClient.CoreV1().
|
||||
Pods(c.agentPodConfig.Namespace).
|
||||
Create(ctx.Context, agentPod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("cannot create agent pod: %w", err)
|
||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
||||
ctx.Context,
|
||||
c.credentialIssuerLocationConfig.Name,
|
||||
c.credentialIssuerLabels,
|
||||
c.pinnipedAPIClient,
|
||||
strategyError(c.clock, err),
|
||||
)
|
||||
if strategyResultUpdateErr != nil {
|
||||
// If the CI update fails, then we probably want to try again. This controller will get
|
||||
// called again because of the pod create failure, so just try the CI update again then.
|
||||
klog.ErrorS(strategyResultUpdateErr, "could not create or update CredentialIssuer")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// The deleter controller handles the case where the expected fields do not match in the agent pod.
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func findAgentPodForSpecificControllerManagerPod(
|
||||
controllerManagerPod *corev1.Pod,
|
||||
kubeSystemPodInformer corev1informers.PodInformer,
|
||||
agentPodInformer corev1informers.PodInformer,
|
||||
agentSelector labels.Selector,
|
||||
) (*corev1.Pod, error) {
|
||||
agentPods, err := agentPodInformer.
|
||||
Lister().
|
||||
List(agentSelector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("informer cannot list agent pods: %w", err)
|
||||
}
|
||||
|
||||
for _, maybeAgentPod := range agentPods {
|
||||
maybeControllerManagerPod, err := findControllerManagerPodForSpecificAgentPod(
|
||||
maybeAgentPod,
|
||||
kubeSystemPodInformer,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if maybeControllerManagerPod != nil &&
|
||||
maybeControllerManagerPod.UID == controllerManagerPod.UID {
|
||||
return maybeAgentPod, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1,623 +0,0 @@
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubecertagent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
||||
coretesting "k8s.io/client-go/testing"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||
pinnipedfake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
)
|
||||
|
||||
func TestCreaterControllerFilter(t *testing.T) {
|
||||
defineSharedKubecertagentFilterSpecs(
|
||||
t,
|
||||
"CreaterControllerFilter",
|
||||
func(
|
||||
agentPodConfig *AgentPodConfig,
|
||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig,
|
||||
kubeSystemPodInformer corev1informers.PodInformer,
|
||||
agentPodInformer corev1informers.PodInformer,
|
||||
observableWithInformerOption *testutil.ObservableWithInformerOption,
|
||||
) {
|
||||
_ = NewCreaterController(
|
||||
agentPodConfig,
|
||||
credentialIssuerLocationConfig,
|
||||
map[string]string{},
|
||||
nil, // clock, shouldn't matter
|
||||
nil, // k8sClient, shouldn't matter
|
||||
nil, // pinnipedAPIClient, shouldn't matter
|
||||
kubeSystemPodInformer,
|
||||
agentPodInformer,
|
||||
observableWithInformerOption.WithInformer,
|
||||
controllerlib.WithInitialEvent,
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestCreaterControllerInitialEvent(t *testing.T) {
|
||||
kubeSystemInformerClient := kubernetesfake.NewSimpleClientset()
|
||||
kubeSystemInformers := kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
|
||||
|
||||
agentInformerClient := kubernetesfake.NewSimpleClientset()
|
||||
agentInformers := kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
|
||||
|
||||
observableWithInitialEventOption := testutil.NewObservableWithInitialEventOption()
|
||||
|
||||
_ = NewCreaterController(
|
||||
nil, // agentPodConfig, shouldn't matter
|
||||
nil, // credentialIssuerLocationConfig, shouldn't matter
|
||||
map[string]string{},
|
||||
nil, // clock, shouldn't matter
|
||||
nil, // k8sClient, shouldn't matter
|
||||
nil, // pinnipedAPIClient, shouldn't matter
|
||||
kubeSystemInformers.Core().V1().Pods(),
|
||||
agentInformers.Core().V1().Pods(),
|
||||
controllerlib.WithInformer,
|
||||
observableWithInitialEventOption.WithInitialEvent,
|
||||
)
|
||||
require.Equal(t, &controllerlib.Key{}, observableWithInitialEventOption.GetInitialEventKey())
|
||||
}
|
||||
|
||||
func TestCreaterControllerSync(t *testing.T) {
|
||||
spec.Run(t, "CreaterControllerSync", func(t *testing.T, when spec.G, it spec.S) {
|
||||
const kubeSystemNamespace = "kube-system"
|
||||
const agentPodNamespace = "agent-pod-namespace"
|
||||
const credentialIssuerResourceName = "ci-resource-name"
|
||||
|
||||
var r *require.Assertions
|
||||
|
||||
var subject controllerlib.Controller
|
||||
var kubeAPIClient *kubernetesfake.Clientset
|
||||
var kubeSystemInformerClient *kubernetesfake.Clientset
|
||||
var kubeSystemInformers kubeinformers.SharedInformerFactory
|
||||
var agentInformerClient *kubernetesfake.Clientset
|
||||
var agentInformers kubeinformers.SharedInformerFactory
|
||||
var pinnipedAPIClient *pinnipedfake.Clientset
|
||||
var cancelContext context.Context
|
||||
var cancelContextCancelFunc context.CancelFunc
|
||||
var syncContext *controllerlib.Context
|
||||
var controllerManagerPod, agentPod *corev1.Pod
|
||||
var podsGVR schema.GroupVersionResource
|
||||
var credentialIssuerGVR schema.GroupVersionResource
|
||||
var frozenNow time.Time
|
||||
|
||||
// Defer starting the informers until the last possible moment so that the
|
||||
// nested Before's can keep adding things to the informer caches.
|
||||
var startInformersAndController = func() {
|
||||
// Set this at the last second to allow for injection of server override.
|
||||
subject = NewCreaterController(
|
||||
&AgentPodConfig{
|
||||
Namespace: agentPodNamespace,
|
||||
ContainerImage: "some-agent-image",
|
||||
PodNamePrefix: "some-agent-name-",
|
||||
ContainerImagePullSecrets: []string{"some-image-pull-secret"},
|
||||
AdditionalLabels: map[string]string{
|
||||
"myLabelKey1": "myLabelValue1",
|
||||
"myLabelKey2": "myLabelValue2",
|
||||
},
|
||||
},
|
||||
&CredentialIssuerLocationConfig{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
map[string]string{
|
||||
"myLabelKey1": "myLabelValue1",
|
||||
"myLabelKey2": "myLabelValue2",
|
||||
},
|
||||
clock.NewFakeClock(frozenNow),
|
||||
kubeAPIClient,
|
||||
pinnipedAPIClient,
|
||||
kubeSystemInformers.Core().V1().Pods(),
|
||||
agentInformers.Core().V1().Pods(),
|
||||
controllerlib.WithInformer,
|
||||
controllerlib.WithInitialEvent,
|
||||
)
|
||||
|
||||
// Set this at the last second to support calling subject.Name().
|
||||
syncContext = &controllerlib.Context{
|
||||
Context: cancelContext,
|
||||
Name: subject.Name(),
|
||||
Key: controllerlib.Key{
|
||||
Namespace: kubeSystemNamespace,
|
||||
Name: "should-not-matter",
|
||||
},
|
||||
}
|
||||
|
||||
// Must start informers before calling TestRunSynchronously()
|
||||
kubeSystemInformers.Start(cancelContext.Done())
|
||||
agentInformers.Start(cancelContext.Done())
|
||||
controllerlib.TestRunSynchronously(t, subject)
|
||||
}
|
||||
|
||||
it.Before(func() {
|
||||
r = require.New(t)
|
||||
|
||||
kubeAPIClient = kubernetesfake.NewSimpleClientset()
|
||||
|
||||
kubeSystemInformerClient = kubernetesfake.NewSimpleClientset()
|
||||
kubeSystemInformers = kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
|
||||
|
||||
agentInformerClient = kubernetesfake.NewSimpleClientset()
|
||||
agentInformers = kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
|
||||
|
||||
pinnipedAPIClient = pinnipedfake.NewSimpleClientset()
|
||||
|
||||
cancelContext, cancelContextCancelFunc = context.WithCancel(context.Background())
|
||||
|
||||
controllerManagerPod, agentPod = exampleControllerManagerAndAgentPods(
|
||||
kubeSystemNamespace, agentPodNamespace, "ignored for this test", "ignored for this test",
|
||||
)
|
||||
|
||||
podsGVR = schema.GroupVersionResource{
|
||||
Group: corev1.SchemeGroupVersion.Group,
|
||||
Version: corev1.SchemeGroupVersion.Version,
|
||||
Resource: "pods",
|
||||
}
|
||||
|
||||
credentialIssuerGVR = schema.GroupVersionResource{
|
||||
Group: configv1alpha1.GroupName,
|
||||
Version: configv1alpha1.SchemeGroupVersion.Version,
|
||||
Resource: "credentialissuers",
|
||||
}
|
||||
|
||||
frozenNow = time.Date(2020, time.September, 23, 7, 42, 0, 0, time.Local)
|
||||
|
||||
// Add a pod into the test that doesn't matter to make sure we don't accidentally trigger any
|
||||
// logic on this thing.
|
||||
ignorablePod := corev1.Pod{}
|
||||
ignorablePod.Name = "some-ignorable-pod"
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(&ignorablePod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(&ignorablePod))
|
||||
|
||||
// Add another valid agent pod to make sure our logic works for just the pod we care about.
|
||||
otherAgentPod := agentPod.DeepCopy()
|
||||
otherAgentPod.Name = "some-other-agent"
|
||||
otherAgentPod.Annotations = map[string]string{
|
||||
"kube-cert-agent.pinniped.dev/controller-manager-name": "some-other-controller-manager-name",
|
||||
"kube-cert-agent.pinniped.dev/controller-manager-uid": "some-other-controller-manager-uid",
|
||||
}
|
||||
r.NoError(agentInformerClient.Tracker().Add(otherAgentPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(otherAgentPod))
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
cancelContextCancelFunc()
|
||||
})
|
||||
|
||||
when("there is a controller manager pod", func() {
|
||||
it.Before(func() {
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
when("there is a matching agent pod", func() {
|
||||
it.Before(func() {
|
||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
||||
})
|
||||
|
||||
it("does nothing", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
r.Empty(kubeAPIClient.Actions())
|
||||
})
|
||||
})
|
||||
|
||||
when("there is a matching agent pod that is missing some of the configured additional labels", func() {
|
||||
it.Before(func() {
|
||||
nonMatchingAgentPod := agentPod.DeepCopy()
|
||||
delete(nonMatchingAgentPod.ObjectMeta.Labels, "myLabelKey1")
|
||||
r.NoError(agentInformerClient.Tracker().Add(nonMatchingAgentPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(nonMatchingAgentPod))
|
||||
})
|
||||
|
||||
it("does nothing because the deleter controller is responsible for deleting it", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
r.Empty(kubeAPIClient.Actions())
|
||||
})
|
||||
})
|
||||
|
||||
when("there is a non-matching agent pod", func() {
|
||||
it.Before(func() {
|
||||
nonMatchingAgentPod := agentPod.DeepCopy()
|
||||
nonMatchingAgentPod.Name = "some-agent-name-85da432e"
|
||||
nonMatchingAgentPod.Annotations[controllerManagerUIDAnnotationKey] = "some-non-matching-uid"
|
||||
r.NoError(agentInformerClient.Tracker().Add(nonMatchingAgentPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(nonMatchingAgentPod))
|
||||
})
|
||||
|
||||
it("creates a matching agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
coretesting.NewCreateAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
agentPod,
|
||||
),
|
||||
},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
when("there is no matching agent pod", func() {
|
||||
it("creates a matching agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
coretesting.NewCreateAction(
|
||||
podsGVR,
|
||||
agentPodNamespace,
|
||||
agentPod,
|
||||
),
|
||||
},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
|
||||
when("creating the matching agent pod fails", func() {
|
||||
it.Before(func() {
|
||||
kubeAPIClient.PrependReactor(
|
||||
"create",
|
||||
"pods",
|
||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some create error")
|
||||
},
|
||||
)
|
||||
})
|
||||
|
||||
when("there is already a CredentialIssuer", func() {
|
||||
var initialCredentialIssuer *configv1alpha1.CredentialIssuer
|
||||
|
||||
it.Before(func() {
|
||||
initialCredentialIssuer = &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{},
|
||||
},
|
||||
}
|
||||
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuer))
|
||||
})
|
||||
|
||||
it("updates the CredentialIssuer status saying that controller manager pods couldn't be found", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
expectedCredentialIssuer := initialCredentialIssuer.DeepCopy()
|
||||
expectedCredentialIssuer.Status.Strategies = []configv1alpha1.CredentialIssuerStrategy{
|
||||
{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: "cannot create agent pod: some create error",
|
||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
||||
},
|
||||
}
|
||||
expectedGetAction := coretesting.NewRootGetAction(
|
||||
credentialIssuerGVR,
|
||||
credentialIssuerResourceName,
|
||||
)
|
||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
||||
credentialIssuerGVR,
|
||||
"status",
|
||||
expectedCredentialIssuer,
|
||||
)
|
||||
|
||||
r.EqualError(err, "cannot create agent pod: some create error")
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
expectedGetAction,
|
||||
expectedUpdateAction,
|
||||
},
|
||||
pinnipedAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
|
||||
when("the CredentialIssuer operation fails", func() {
|
||||
it.Before(func() {
|
||||
pinnipedAPIClient.PrependReactor(
|
||||
"update",
|
||||
"credentialissuers",
|
||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some update error")
|
||||
},
|
||||
)
|
||||
|
||||
it("still returns the pod create error, since the controller will get rescheduled", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
r.EqualError(err, "cannot create agent pod: some create error")
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("there is not already a CredentialIssuer", func() {
|
||||
it("returns an error and updates the CredentialIssuer status", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
Labels: map[string]string{
|
||||
"myLabelKey1": "myLabelValue1",
|
||||
"myLabelKey2": "myLabelValue2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
Labels: map[string]string{
|
||||
"myLabelKey1": "myLabelValue1",
|
||||
"myLabelKey2": "myLabelValue2",
|
||||
},
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
||||
{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: "cannot create agent pod: some create error",
|
||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedGetAction := coretesting.NewRootGetAction(
|
||||
credentialIssuerGVR,
|
||||
credentialIssuerResourceName,
|
||||
)
|
||||
expectedCreateAction := coretesting.NewRootCreateAction(
|
||||
credentialIssuerGVR,
|
||||
expectedCreateCredentialIssuer,
|
||||
)
|
||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
||||
credentialIssuerGVR,
|
||||
"status",
|
||||
expectedCredentialIssuer,
|
||||
)
|
||||
|
||||
r.EqualError(err, "cannot create agent pod: some create error")
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
expectedGetAction,
|
||||
expectedCreateAction,
|
||||
expectedUpdateAction,
|
||||
},
|
||||
pinnipedAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("there is no controller manager pod", func() {
|
||||
when("there is already a CredentialIssuer", func() {
|
||||
var initialCredentialIssuer *configv1alpha1.CredentialIssuer
|
||||
|
||||
it.Before(func() {
|
||||
initialCredentialIssuer = &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{},
|
||||
},
|
||||
}
|
||||
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuer))
|
||||
})
|
||||
|
||||
it("updates the CredentialIssuer status saying that controller manager pods couldn't be found", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
|
||||
expectedCredentialIssuer := initialCredentialIssuer.DeepCopy()
|
||||
expectedCredentialIssuer.Status.Strategies = []configv1alpha1.CredentialIssuerStrategy{
|
||||
{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: "did not find kube-controller-manager pod(s)",
|
||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
||||
},
|
||||
}
|
||||
expectedGetAction := coretesting.NewRootGetAction(
|
||||
credentialIssuerGVR,
|
||||
credentialIssuerResourceName,
|
||||
)
|
||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
||||
credentialIssuerGVR,
|
||||
"status",
|
||||
expectedCredentialIssuer,
|
||||
)
|
||||
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
expectedGetAction,
|
||||
expectedUpdateAction,
|
||||
},
|
||||
pinnipedAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
|
||||
when("when updating the CredentialIssuer fails", func() {
|
||||
it.Before(func() {
|
||||
pinnipedAPIClient.PrependReactor(
|
||||
"update",
|
||||
"credentialissuers",
|
||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some update error")
|
||||
},
|
||||
)
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
r.EqualError(err, "could not create or update credentialissuer: some update error")
|
||||
})
|
||||
})
|
||||
|
||||
when("when getting the CredentialIssuer fails", func() {
|
||||
it.Before(func() {
|
||||
pinnipedAPIClient.PrependReactor(
|
||||
"get",
|
||||
"credentialissuers",
|
||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some get error")
|
||||
},
|
||||
)
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
r.EqualError(err, "could not create or update credentialissuer: get failed: some get error")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("there is not already a CredentialIssuer", func() {
|
||||
it("creates the CredentialIssuer status saying that controller manager pods couldn't be found", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
Labels: map[string]string{
|
||||
"myLabelKey1": "myLabelValue1",
|
||||
"myLabelKey2": "myLabelValue2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
Labels: map[string]string{
|
||||
"myLabelKey1": "myLabelValue1",
|
||||
"myLabelKey2": "myLabelValue2",
|
||||
},
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
||||
{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: "did not find kube-controller-manager pod(s)",
|
||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedGetAction := coretesting.NewRootGetAction(
|
||||
credentialIssuerGVR,
|
||||
credentialIssuerResourceName,
|
||||
)
|
||||
expectedCreateAction := coretesting.NewRootCreateAction(
|
||||
credentialIssuerGVR,
|
||||
expectedCreateCredentialIssuer,
|
||||
)
|
||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
||||
credentialIssuerGVR,
|
||||
"status",
|
||||
expectedCredentialIssuer,
|
||||
)
|
||||
|
||||
r.NoError(err)
|
||||
r.Equal(
|
||||
[]coretesting.Action{
|
||||
expectedGetAction,
|
||||
expectedCreateAction,
|
||||
expectedUpdateAction,
|
||||
},
|
||||
pinnipedAPIClient.Actions(),
|
||||
)
|
||||
})
|
||||
|
||||
when("when creating the CredentialIssuer fails", func() {
|
||||
it.Before(func() {
|
||||
pinnipedAPIClient.PrependReactor(
|
||||
"create",
|
||||
"credentialissuers",
|
||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some create error")
|
||||
},
|
||||
)
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
r.EqualError(err, "could not create or update credentialissuer: create failed: some create error")
|
||||
})
|
||||
})
|
||||
|
||||
when("when getting the CredentialIssuer fails", func() {
|
||||
it.Before(func() {
|
||||
pinnipedAPIClient.PrependReactor(
|
||||
"get",
|
||||
"credentialissuers",
|
||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some get error")
|
||||
},
|
||||
)
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
r.EqualError(err, "could not create or update credentialissuer: get failed: some get error")
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubecertagent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
type deleterController struct {
|
||||
agentPodConfig *AgentPodConfig
|
||||
k8sClient kubernetes.Interface
|
||||
kubeSystemPodInformer corev1informers.PodInformer
|
||||
agentPodInformer corev1informers.PodInformer
|
||||
}
|
||||
|
||||
// NewDeleterController returns a controller that deletes any kube-cert-agent pods that are out of
|
||||
// sync with the known kube-controller-manager pods.
|
||||
func NewDeleterController(
|
||||
agentPodConfig *AgentPodConfig,
|
||||
k8sClient kubernetes.Interface,
|
||||
kubeSystemPodInformer corev1informers.PodInformer,
|
||||
agentPodInformer corev1informers.PodInformer,
|
||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
||||
) controllerlib.Controller {
|
||||
return controllerlib.New(
|
||||
controllerlib.Config{
|
||||
Name: "kube-cert-agent-deleter-controller",
|
||||
Syncer: &deleterController{
|
||||
agentPodConfig: agentPodConfig,
|
||||
k8sClient: k8sClient,
|
||||
kubeSystemPodInformer: kubeSystemPodInformer,
|
||||
agentPodInformer: agentPodInformer,
|
||||
},
|
||||
},
|
||||
withInformer(
|
||||
kubeSystemPodInformer,
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isControllerManagerPod),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
withInformer(
|
||||
agentPodInformer,
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isAgentPod),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// Sync implements controllerlib.Syncer.
|
||||
func (c *deleterController) Sync(ctx controllerlib.Context) error {
|
||||
agentPods, err := c.agentPodInformer.
|
||||
Lister().
|
||||
Pods(c.agentPodConfig.Namespace).
|
||||
List(c.agentPodConfig.AgentSelector())
|
||||
if err != nil {
|
||||
return fmt.Errorf("informer cannot list agent pods: %w", err)
|
||||
}
|
||||
|
||||
for _, agentPod := range agentPods {
|
||||
controllerManagerPod, err := findControllerManagerPodForSpecificAgentPod(agentPod, c.kubeSystemPodInformer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if controllerManagerPod == nil ||
|
||||
!isAgentPodUpToDate(agentPod, c.agentPodConfig.newAgentPod(controllerManagerPod)) {
|
||||
plog.Debug("deleting agent pod", "pod", klog.KObj(agentPod))
|
||||
err := c.k8sClient.
|
||||
CoreV1().
|
||||
Pods(agentPod.Namespace).
|
||||
Delete(ctx.Context, agentPod.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot delete agent pod: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,506 +0,0 @@
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubecertagent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
||||
coretesting "k8s.io/client-go/testing"
|
||||
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
)
|
||||
|
||||
func TestDeleterControllerFilter(t *testing.T) {
|
||||
defineSharedKubecertagentFilterSpecs(
|
||||
t,
|
||||
"DeleterControllerFilter",
|
||||
func(
|
||||
agentPodConfig *AgentPodConfig,
|
||||
_ *CredentialIssuerLocationConfig,
|
||||
kubeSystemPodInformer corev1informers.PodInformer,
|
||||
agentPodInformer corev1informers.PodInformer,
|
||||
observableWithInformerOption *testutil.ObservableWithInformerOption,
|
||||
) {
|
||||
_ = NewDeleterController(
|
||||
agentPodConfig,
|
||||
nil, // k8sClient, shouldn't matter
|
||||
kubeSystemPodInformer,
|
||||
agentPodInformer,
|
||||
observableWithInformerOption.WithInformer,
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestDeleterControllerSync(t *testing.T) {
|
||||
spec.Run(t, "DeleterControllerSync", func(t *testing.T, when spec.G, it spec.S) {
|
||||
const kubeSystemNamespace = "kube-system"
|
||||
const agentPodNamespace = "agent-pod-namespace"
|
||||
|
||||
var r *require.Assertions
|
||||
|
||||
var subject controllerlib.Controller
|
||||
var kubeAPIClient *kubernetesfake.Clientset
|
||||
var kubeSystemInformerClient *kubernetesfake.Clientset
|
||||
var kubeSystemInformers kubeinformers.SharedInformerFactory
|
||||
var agentInformerClient *kubernetesfake.Clientset
|
||||
var agentInformers kubeinformers.SharedInformerFactory
|
||||
var cancelContext context.Context
|
||||
var cancelContextCancelFunc context.CancelFunc
|
||||
var syncContext *controllerlib.Context
|
||||
var controllerManagerPod, agentPod *corev1.Pod
|
||||
var podsGVR schema.GroupVersionResource
|
||||
|
||||
// Defer starting the informers until the last possible moment so that the
|
||||
// nested Before's can keep adding things to the informer caches.
|
||||
var startInformersAndController = func() {
|
||||
// Set this at the last second to allow for injection of server override.
|
||||
subject = NewDeleterController(
|
||||
&AgentPodConfig{
|
||||
Namespace: agentPodNamespace,
|
||||
ContainerImage: "some-agent-image",
|
||||
PodNamePrefix: "some-agent-name-",
|
||||
AdditionalLabels: map[string]string{
|
||||
"myLabelKey1": "myLabelValue1",
|
||||
"myLabelKey2": "myLabelValue2",
|
||||
},
|
||||
},
|
||||
kubeAPIClient,
|
||||
kubeSystemInformers.Core().V1().Pods(),
|
||||
agentInformers.Core().V1().Pods(),
|
||||
controllerlib.WithInformer,
|
||||
)
|
||||
|
||||
// Set this at the last second to support calling subject.Name().
|
||||
syncContext = &controllerlib.Context{
|
||||
Context: cancelContext,
|
||||
Name: subject.Name(),
|
||||
Key: controllerlib.Key{
|
||||
Namespace: kubeSystemNamespace,
|
||||
Name: "should-not-matter",
|
||||
},
|
||||
}
|
||||
|
||||
// Must start informers before calling TestRunSynchronously()
|
||||
kubeSystemInformers.Start(cancelContext.Done())
|
||||
agentInformers.Start(cancelContext.Done())
|
||||
controllerlib.TestRunSynchronously(t, subject)
|
||||
}
|
||||
|
||||
var requireAgentPodWasDeleted = func() {
|
||||
r.Equal(
|
||||
[]coretesting.Action{coretesting.NewDeleteAction(podsGVR, agentPodNamespace, agentPod.Name)},
|
||||
kubeAPIClient.Actions(),
|
||||
)
|
||||
}
|
||||
|
||||
it.Before(func() {
|
||||
r = require.New(t)
|
||||
|
||||
cancelContext, cancelContextCancelFunc = context.WithCancel(context.Background())
|
||||
|
||||
kubeAPIClient = kubernetesfake.NewSimpleClientset()
|
||||
|
||||
kubeSystemInformerClient = kubernetesfake.NewSimpleClientset()
|
||||
kubeSystemInformers = kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
|
||||
|
||||
agentInformerClient = kubernetesfake.NewSimpleClientset()
|
||||
agentInformers = kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
|
||||
|
||||
controllerManagerPod, agentPod = exampleControllerManagerAndAgentPods(
|
||||
kubeSystemNamespace, agentPodNamespace, "ignored for this test", "ignored for this test",
|
||||
)
|
||||
|
||||
podsGVR = schema.GroupVersionResource{
|
||||
Group: corev1.SchemeGroupVersion.Group,
|
||||
Version: corev1.SchemeGroupVersion.Version,
|
||||
Resource: "pods",
|
||||
}
|
||||
|
||||
// Add an pod into the test that doesn't matter to make sure we don't accidentally
|
||||
// trigger any logic on this thing.
|
||||
ignorablePod := corev1.Pod{}
|
||||
ignorablePod.Name = "some-ignorable-pod"
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(&ignorablePod))
|
||||
r.NoError(agentInformerClient.Tracker().Add(&ignorablePod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(&ignorablePod))
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
cancelContextCancelFunc()
|
||||
})
|
||||
|
||||
when("there is an agent pod", func() {
|
||||
it.Before(func() {
|
||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
||||
})
|
||||
|
||||
when("there is a matching controller manager pod", func() {
|
||||
it.Before(func() {
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("does nothing", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
r.Empty(kubeAPIClient.Actions())
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync with the controller manager via volume mounts", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{Name: "some-other-volume-mount"}}
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync with the controller manager via volumes", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.Spec.Volumes = []corev1.Volume{{Name: "some-other-volume"}}
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync with the controller manager via node selector", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.Spec.NodeSelector = map[string]string{
|
||||
"some-other-node-selector-key": "some-other-node-selector-value",
|
||||
}
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync with the controller manager via node name", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.Spec.NodeName = "some-other-node-name"
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync with the controller manager via tolerations", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.Spec.Tolerations = []corev1.Toleration{{Key: "some-other-toleration-key"}}
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync via restart policy", func() {
|
||||
it.Before(func() {
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Spec.RestartPolicy = corev1.RestartPolicyAlways
|
||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync via automount service account token", func() {
|
||||
it.Before(func() {
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
t := true
|
||||
updatedAgentPod.Spec.AutomountServiceAccountToken = &t
|
||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync with the template via name", func() {
|
||||
it.Before(func() {
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Spec.Containers[0].Name = "some-new-name"
|
||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync with the template via image", func() {
|
||||
it.Before(func() {
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Spec.Containers[0].Image = "new-image"
|
||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync with the template via runAsUser", func() {
|
||||
it.Before(func() {
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
notRoot := int64(1234)
|
||||
updatedAgentPod.Spec.SecurityContext.RunAsUser = ¬Root
|
||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync with the template via runAsGroup", func() {
|
||||
it.Before(func() {
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
notRoot := int64(1234)
|
||||
updatedAgentPod.Spec.SecurityContext.RunAsGroup = ¬Root
|
||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync with the template via having a nil SecurityContext", func() {
|
||||
it.Before(func() {
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Spec.SecurityContext = nil
|
||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync with the template via labels", func() {
|
||||
when("an additional label's value was changed", func() {
|
||||
it.Before(func() {
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.ObjectMeta.Labels = map[string]string{
|
||||
"kube-cert-agent.pinniped.dev": "true",
|
||||
// the value of a label is wrong so the pod should be deleted so it can get recreated with the new labels
|
||||
"myLabelKey1": "myLabelValue1-outdated-value",
|
||||
"myLabelKey2": "myLabelValue2-outdated-value",
|
||||
}
|
||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("an additional custom label was added since the agent pod was created", func() {
|
||||
it.Before(func() {
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.ObjectMeta.Labels = map[string]string{
|
||||
"kube-cert-agent.pinniped.dev": "true",
|
||||
"myLabelKey1": "myLabelValue1",
|
||||
// "myLabelKey2" is missing so the pod should be deleted so it can get recreated with the new labels
|
||||
}
|
||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod has extra labels that seem unrelated to the additional labels", func() {
|
||||
it.Before(func() {
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.ObjectMeta.Labels = map[string]string{
|
||||
"kube-cert-agent.pinniped.dev": "true",
|
||||
"myLabelKey1": "myLabelValue1",
|
||||
"myLabelKey2": "myLabelValue2",
|
||||
"extra-label": "not-related-to-the-sepcified-additional-labels",
|
||||
}
|
||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
})
|
||||
|
||||
it("does not delete the agent pod because someone else might have put those labels on it", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
r.Empty(kubeAPIClient.Actions())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("the agent pod is out of sync with the template via command", func() {
|
||||
it.Before(func() {
|
||||
updatedAgentPod := agentPod.DeepCopy()
|
||||
updatedAgentPod.Spec.Containers[0].Command = []string{"some", "new", "command"}
|
||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("there is a non-matching controller manager pod via uid", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.UID = "some-other-controller-manager-uid"
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("there is a non-matching controller manager pod via name", func() {
|
||||
it.Before(func() {
|
||||
controllerManagerPod.Name = "some-other-controller-manager-name"
|
||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
||||
})
|
||||
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
|
||||
when("there is no matching controller manager pod", func() {
|
||||
it("deletes the agent pod", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
requireAgentPodWasDeleted()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("there is no agent pod", func() {
|
||||
it("does nothing", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
|
||||
r.NoError(err)
|
||||
r.Empty(kubeAPIClient.Actions())
|
||||
})
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
@@ -1,232 +0,0 @@
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubecertagent
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||
pinnipedclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controller/issuerconfig"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/dynamiccert"
|
||||
)
|
||||
|
||||
const (
|
||||
ClusterInfoNamespace = "kube-public"
|
||||
clusterInfoName = "cluster-info"
|
||||
clusterInfoConfigMapKey = "kubeconfig"
|
||||
)
|
||||
|
||||
type execerController struct {
|
||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig
|
||||
credentialIssuerLabels map[string]string
|
||||
discoveryURLOverride *string
|
||||
dynamicCertProvider dynamiccert.Private
|
||||
podCommandExecutor PodCommandExecutor
|
||||
clock clock.Clock
|
||||
pinnipedAPIClient pinnipedclientset.Interface
|
||||
agentPodInformer corev1informers.PodInformer
|
||||
configMapInformer corev1informers.ConfigMapInformer
|
||||
}
|
||||
|
||||
// NewExecerController returns a controllerlib.Controller that listens for agent pods with proper
|
||||
// cert/key path annotations and execs into them to get the cert/key material. It sets the retrieved
|
||||
// key material in a provided dynamicCertProvider.
|
||||
//
|
||||
// It also is tasked with updating the CredentialIssuer, located via the provided
|
||||
// credentialIssuerLocationConfig, with any errors that it encounters.
|
||||
func NewExecerController(
|
||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig,
|
||||
credentialIssuerLabels map[string]string,
|
||||
discoveryURLOverride *string,
|
||||
dynamicCertProvider dynamiccert.Private,
|
||||
podCommandExecutor PodCommandExecutor,
|
||||
pinnipedAPIClient pinnipedclientset.Interface,
|
||||
clock clock.Clock,
|
||||
agentPodInformer corev1informers.PodInformer,
|
||||
configMapInformer corev1informers.ConfigMapInformer,
|
||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
||||
) controllerlib.Controller {
|
||||
return controllerlib.New(
|
||||
controllerlib.Config{
|
||||
Name: "kube-cert-agent-execer-controller",
|
||||
Syncer: &execerController{
|
||||
credentialIssuerLocationConfig: credentialIssuerLocationConfig,
|
||||
credentialIssuerLabels: credentialIssuerLabels,
|
||||
discoveryURLOverride: discoveryURLOverride,
|
||||
dynamicCertProvider: dynamicCertProvider,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
pinnipedAPIClient: pinnipedAPIClient,
|
||||
clock: clock,
|
||||
agentPodInformer: agentPodInformer,
|
||||
configMapInformer: configMapInformer,
|
||||
},
|
||||
},
|
||||
withInformer(
|
||||
agentPodInformer,
|
||||
pinnipedcontroller.SimpleFilter(isAgentPod, nil), // nil parent func is fine because each event is distinct
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
withInformer(
|
||||
configMapInformer,
|
||||
pinnipedcontroller.NameAndNamespaceExactMatchFilterFactory(clusterInfoName, ClusterInfoNamespace),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
func (c *execerController) Sync(ctx controllerlib.Context) error {
|
||||
maybeAgentPod, err := c.agentPodInformer.Lister().Pods(ctx.Key.Namespace).Get(ctx.Key.Name)
|
||||
notFound := k8serrors.IsNotFound(err)
|
||||
if err != nil && !notFound {
|
||||
return fmt.Errorf("failed to get %s/%s pod: %w", ctx.Key.Namespace, ctx.Key.Name, err)
|
||||
}
|
||||
if notFound {
|
||||
// The pod in question does not exist, so it was probably deleted
|
||||
return nil
|
||||
}
|
||||
|
||||
certPath, keyPath := c.getKeypairFilePaths(maybeAgentPod)
|
||||
if certPath == "" || keyPath == "" {
|
||||
// The annotator controller has not annotated this agent pod yet, or it is not an agent pod at all
|
||||
return nil
|
||||
}
|
||||
agentPod := maybeAgentPod
|
||||
|
||||
if agentPod.Status.Phase != v1.PodRunning {
|
||||
// Seems to be an agent pod, but it is not ready yet
|
||||
return nil
|
||||
}
|
||||
|
||||
certPEM, err := c.podCommandExecutor.Exec(agentPod.Namespace, agentPod.Name, "cat", certPath)
|
||||
if err != nil {
|
||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
||||
ctx.Context,
|
||||
c.credentialIssuerLocationConfig.Name,
|
||||
c.credentialIssuerLabels,
|
||||
c.pinnipedAPIClient,
|
||||
strategyError(c.clock, err),
|
||||
)
|
||||
return newAggregate(err, strategyResultUpdateErr)
|
||||
}
|
||||
|
||||
keyPEM, err := c.podCommandExecutor.Exec(agentPod.Namespace, agentPod.Name, "cat", keyPath)
|
||||
if err != nil {
|
||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
||||
ctx.Context,
|
||||
c.credentialIssuerLocationConfig.Name,
|
||||
c.credentialIssuerLabels,
|
||||
c.pinnipedAPIClient,
|
||||
strategyError(c.clock, err),
|
||||
)
|
||||
return newAggregate(err, strategyResultUpdateErr)
|
||||
}
|
||||
|
||||
if err := c.dynamicCertProvider.SetCertKeyContent([]byte(certPEM), []byte(keyPEM)); err != nil {
|
||||
err = fmt.Errorf("failed to set signing cert/key content from agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
||||
ctx.Context,
|
||||
c.credentialIssuerLocationConfig.Name,
|
||||
c.credentialIssuerLabels,
|
||||
c.pinnipedAPIClient,
|
||||
strategyError(c.clock, err),
|
||||
)
|
||||
return newAggregate(err, strategyResultUpdateErr)
|
||||
}
|
||||
|
||||
apiInfo, err := c.getTokenCredentialRequestAPIInfo()
|
||||
if err != nil {
|
||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
||||
ctx.Context,
|
||||
c.credentialIssuerLocationConfig.Name,
|
||||
c.credentialIssuerLabels,
|
||||
c.pinnipedAPIClient,
|
||||
configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotGetClusterInfoStrategyReason,
|
||||
Message: err.Error(),
|
||||
LastUpdateTime: metav1.NewTime(c.clock.Now()),
|
||||
},
|
||||
)
|
||||
return newAggregate(err, strategyResultUpdateErr)
|
||||
}
|
||||
|
||||
return issuerconfig.UpdateStrategy(
|
||||
ctx.Context,
|
||||
c.credentialIssuerLocationConfig.Name,
|
||||
c.credentialIssuerLabels,
|
||||
c.pinnipedAPIClient,
|
||||
configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.SuccessStrategyStatus,
|
||||
Reason: configv1alpha1.FetchedKeyStrategyReason,
|
||||
Message: "Key was fetched successfully",
|
||||
LastUpdateTime: metav1.NewTime(c.clock.Now()),
|
||||
Frontend: &configv1alpha1.CredentialIssuerFrontend{
|
||||
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
|
||||
TokenCredentialRequestAPIInfo: apiInfo,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *execerController) getTokenCredentialRequestAPIInfo() (*configv1alpha1.TokenCredentialRequestAPIInfo, error) {
|
||||
configMap, err := c.configMapInformer.
|
||||
Lister().
|
||||
ConfigMaps(ClusterInfoNamespace).
|
||||
Get(clusterInfoName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get %s configmap: %w", clusterInfoName, err)
|
||||
}
|
||||
|
||||
kubeConfigYAML, kubeConfigPresent := configMap.Data[clusterInfoConfigMapKey]
|
||||
if !kubeConfigPresent {
|
||||
return nil, fmt.Errorf("failed to get %s key from %s configmap", clusterInfoConfigMapKey, clusterInfoName)
|
||||
}
|
||||
|
||||
kubeconfig, err := clientcmd.Load([]byte(kubeConfigYAML))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load data from %s key in %s configmap", clusterInfoConfigMapKey, clusterInfoName)
|
||||
}
|
||||
|
||||
for _, v := range kubeconfig.Clusters {
|
||||
result := &configv1alpha1.TokenCredentialRequestAPIInfo{
|
||||
Server: v.Server,
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString(v.CertificateAuthorityData),
|
||||
}
|
||||
if c.discoveryURLOverride != nil {
|
||||
result.Server = *c.discoveryURLOverride
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
return nil, fmt.Errorf("kubeconfig in %s key in %s configmap did not contain any clusters", clusterInfoConfigMapKey, clusterInfoName)
|
||||
}
|
||||
|
||||
func (c *execerController) getKeypairFilePaths(pod *v1.Pod) (string, string) {
|
||||
annotations := pod.Annotations
|
||||
if annotations == nil {
|
||||
annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
certPath := annotations[agentPodCertPathAnnotationKey]
|
||||
keyPath := annotations[agentPodKeyPathAnnotationKey]
|
||||
|
||||
return certPath, keyPath
|
||||
}
|
||||
|
||||
func newAggregate(errs ...error) error {
|
||||
return errors.NewAggregate(errs)
|
||||
}
|
||||
@@ -1,733 +0,0 @@
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubecertagent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
||||
coretesting "k8s.io/client-go/testing"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||
pinnipedfake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/dynamiccert"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
)
|
||||
|
||||
func TestExecerControllerOptions(t *testing.T) {
|
||||
spec.Run(t, "options", func(t *testing.T, when spec.G, it spec.S) {
|
||||
var r *require.Assertions
|
||||
var observableWithInformerOption *testutil.ObservableWithInformerOption
|
||||
var agentPodInformerFilter controllerlib.Filter
|
||||
|
||||
whateverPod := &corev1.Pod{}
|
||||
|
||||
it.Before(func() {
|
||||
r = require.New(t)
|
||||
observableWithInformerOption = testutil.NewObservableWithInformerOption()
|
||||
informerFactory := kubeinformers.NewSharedInformerFactory(nil, 0)
|
||||
agentPodsInformer := informerFactory.Core().V1().Pods()
|
||||
configMapsInformer := informerFactory.Core().V1().ConfigMaps()
|
||||
_ = NewExecerController(
|
||||
&CredentialIssuerLocationConfig{
|
||||
Name: "ignored by this test",
|
||||
},
|
||||
nil, // credentialIssuerLabels, not needed for this test
|
||||
nil, // discoveryURLOverride, not needed for this test
|
||||
nil, // dynamicCertProvider, not needed for this test
|
||||
nil, // podCommandExecutor, not needed for this test
|
||||
nil, // pinnipedAPIClient, not needed for this test
|
||||
nil, // clock, not needed for this test
|
||||
agentPodsInformer,
|
||||
configMapsInformer,
|
||||
observableWithInformerOption.WithInformer,
|
||||
)
|
||||
agentPodInformerFilter = observableWithInformerOption.GetFilterForInformer(agentPodsInformer)
|
||||
})
|
||||
|
||||
when("the change is happening in the agent's namespace", func() {
|
||||
when("a pod with all agent labels is added/updated/deleted", func() {
|
||||
it("returns true", func() {
|
||||
pod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"kube-cert-agent.pinniped.dev": "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
r.True(agentPodInformerFilter.Add(pod))
|
||||
r.True(agentPodInformerFilter.Update(whateverPod, pod))
|
||||
r.True(agentPodInformerFilter.Update(pod, whateverPod))
|
||||
r.True(agentPodInformerFilter.Delete(pod))
|
||||
})
|
||||
})
|
||||
|
||||
when("a pod missing the agent label is added/updated/deleted", func() {
|
||||
it("returns false", func() {
|
||||
pod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"some-other-label-key": "some-other-label-value",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
r.False(agentPodInformerFilter.Add(pod))
|
||||
r.False(agentPodInformerFilter.Update(whateverPod, pod))
|
||||
r.False(agentPodInformerFilter.Update(pod, whateverPod))
|
||||
r.False(agentPodInformerFilter.Delete(pod))
|
||||
})
|
||||
})
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
|
||||
type fakePodExecutor struct {
|
||||
r *require.Assertions
|
||||
|
||||
resultsToReturn []string
|
||||
errorsToReturn []error
|
||||
|
||||
calledWithPodName []string
|
||||
calledWithPodNamespace []string
|
||||
calledWithCommandAndArgs [][]string
|
||||
|
||||
callCount int
|
||||
}
|
||||
|
||||
func (s *fakePodExecutor) Exec(podNamespace string, podName string, commandAndArgs ...string) (string, error) {
|
||||
s.calledWithPodNamespace = append(s.calledWithPodNamespace, podNamespace)
|
||||
s.calledWithPodName = append(s.calledWithPodName, podName)
|
||||
s.calledWithCommandAndArgs = append(s.calledWithCommandAndArgs, commandAndArgs)
|
||||
s.r.Less(s.callCount, len(s.resultsToReturn), "unexpected extra invocation of fakePodExecutor")
|
||||
result := s.resultsToReturn[s.callCount]
|
||||
var err error = nil
|
||||
if s.errorsToReturn != nil {
|
||||
s.r.Less(s.callCount, len(s.errorsToReturn), "unexpected extra invocation of fakePodExecutor")
|
||||
err = s.errorsToReturn[s.callCount]
|
||||
}
|
||||
s.callCount++
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func TestManagerControllerSync(t *testing.T) {
|
||||
name := t.Name()
|
||||
spec.Run(t, "Sync", func(t *testing.T, when spec.G, it spec.S) {
|
||||
const agentPodNamespace = "some-namespace"
|
||||
const agentPodName = "some-agent-pod-name-123"
|
||||
const certPathAnnotationName = "kube-cert-agent.pinniped.dev/cert-path"
|
||||
const keyPathAnnotationName = "kube-cert-agent.pinniped.dev/key-path"
|
||||
const fakeCertPath = "/some/cert/path"
|
||||
const fakeKeyPath = "/some/key/path"
|
||||
const credentialIssuerResourceName = "ci-resource-name"
|
||||
|
||||
var r *require.Assertions
|
||||
|
||||
var subject controllerlib.Controller
|
||||
var cancelContext context.Context
|
||||
var cancelContextCancelFunc context.CancelFunc
|
||||
var syncContext *controllerlib.Context
|
||||
var pinnipedAPIClient *pinnipedfake.Clientset
|
||||
var kubeInformerFactory kubeinformers.SharedInformerFactory
|
||||
var kubeClientset *kubernetesfake.Clientset
|
||||
var fakeExecutor *fakePodExecutor
|
||||
var credentialIssuerLabels map[string]string
|
||||
var discoveryURLOverride *string
|
||||
var dynamicCertProvider dynamiccert.Provider
|
||||
var fakeCertPEM, fakeKeyPEM string
|
||||
var credentialIssuerGVR schema.GroupVersionResource
|
||||
var frozenNow time.Time
|
||||
var defaultDynamicCertProviderCert string
|
||||
var defaultDynamicCertProviderKey string
|
||||
|
||||
// Defer starting the informers until the last possible moment so that the
|
||||
// nested Before's can keep adding things to the informer caches.
|
||||
var startInformersAndController = func() {
|
||||
// Set this at the last second to allow for injection of server override.
|
||||
subject = NewExecerController(
|
||||
&CredentialIssuerLocationConfig{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
credentialIssuerLabels,
|
||||
discoveryURLOverride,
|
||||
dynamicCertProvider,
|
||||
fakeExecutor,
|
||||
pinnipedAPIClient,
|
||||
clock.NewFakeClock(frozenNow),
|
||||
kubeInformerFactory.Core().V1().Pods(),
|
||||
kubeInformerFactory.Core().V1().ConfigMaps(),
|
||||
controllerlib.WithInformer,
|
||||
)
|
||||
|
||||
// Set this at the last second to support calling subject.Name().
|
||||
syncContext = &controllerlib.Context{
|
||||
Context: cancelContext,
|
||||
Name: subject.Name(),
|
||||
Key: controllerlib.Key{
|
||||
Namespace: agentPodNamespace,
|
||||
Name: agentPodName,
|
||||
},
|
||||
}
|
||||
|
||||
// Must start informers before calling TestRunSynchronously()
|
||||
kubeInformerFactory.Start(cancelContext.Done())
|
||||
controllerlib.TestRunSynchronously(t, subject)
|
||||
}
|
||||
|
||||
var newAgentPod = func(agentPodName string, hasCertPathAnnotations bool) *corev1.Pod {
|
||||
pod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: agentPodName,
|
||||
Namespace: agentPodNamespace,
|
||||
Labels: map[string]string{
|
||||
"some-label-key": "some-label-value",
|
||||
},
|
||||
},
|
||||
}
|
||||
if hasCertPathAnnotations {
|
||||
pod.Annotations = map[string]string{
|
||||
certPathAnnotationName: fakeCertPath,
|
||||
keyPathAnnotationName: fakeKeyPath,
|
||||
}
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
var requireDynamicCertProviderHasDefaultValues = func() {
|
||||
actualCertPEM, actualKeyPEM := dynamicCertProvider.CurrentCertKeyContent()
|
||||
r.Equal(defaultDynamicCertProviderCert, string(actualCertPEM))
|
||||
r.Equal(defaultDynamicCertProviderKey, string(actualKeyPEM))
|
||||
}
|
||||
|
||||
var requireNoExternalActionsTaken = func() {
|
||||
r.Empty(pinnipedAPIClient.Actions())
|
||||
r.Zero(fakeExecutor.callCount)
|
||||
requireDynamicCertProviderHasDefaultValues()
|
||||
}
|
||||
|
||||
it.Before(func() {
|
||||
r = require.New(t)
|
||||
|
||||
crt, key, err := testutil.CreateCertificate(
|
||||
time.Now().Add(-time.Hour),
|
||||
time.Now().Add(time.Hour),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defaultDynamicCertProviderCert = string(crt)
|
||||
defaultDynamicCertProviderKey = string(key)
|
||||
|
||||
cancelContext, cancelContextCancelFunc = context.WithCancel(context.Background())
|
||||
pinnipedAPIClient = pinnipedfake.NewSimpleClientset()
|
||||
kubeClientset = kubernetesfake.NewSimpleClientset()
|
||||
kubeInformerFactory = kubeinformers.NewSharedInformerFactory(kubeClientset, 0)
|
||||
fakeExecutor = &fakePodExecutor{r: r}
|
||||
frozenNow = time.Date(2020, time.September, 23, 7, 42, 0, 0, time.Local)
|
||||
dynamicCertProvider = dynamiccert.NewCA(name)
|
||||
err = dynamicCertProvider.SetCertKeyContent([]byte(defaultDynamicCertProviderCert), []byte(defaultDynamicCertProviderKey))
|
||||
r.NoError(err)
|
||||
|
||||
loadFile := func(filename string) string {
|
||||
bytes, err := ioutil.ReadFile(filename)
|
||||
r.NoError(err)
|
||||
return string(bytes)
|
||||
}
|
||||
fakeCertPEM = loadFile("./testdata/test.crt")
|
||||
fakeKeyPEM = loadFile("./testdata/test.key")
|
||||
|
||||
credentialIssuerGVR = schema.GroupVersionResource{
|
||||
Group: configv1alpha1.GroupName,
|
||||
Version: configv1alpha1.SchemeGroupVersion.Version,
|
||||
Resource: "credentialissuers",
|
||||
}
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
cancelContextCancelFunc()
|
||||
})
|
||||
|
||||
when("there is not yet any agent pods or they were deleted", func() {
|
||||
it.Before(func() {
|
||||
unrelatedPod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "some other pod",
|
||||
Namespace: agentPodNamespace,
|
||||
},
|
||||
}
|
||||
r.NoError(kubeClientset.Tracker().Add(unrelatedPod))
|
||||
startInformersAndController()
|
||||
})
|
||||
|
||||
it("does nothing", func() {
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
requireNoExternalActionsTaken()
|
||||
})
|
||||
})
|
||||
|
||||
when("there is an agent pod, as determined by its labels matching the agent pod template labels, which is not yet annotated by the annotater controller", func() {
|
||||
it.Before(func() {
|
||||
agentPod := newAgentPod(agentPodName, false)
|
||||
r.NoError(kubeClientset.Tracker().Add(agentPod))
|
||||
startInformersAndController()
|
||||
})
|
||||
|
||||
it("does nothing", func() {
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
requireNoExternalActionsTaken()
|
||||
})
|
||||
})
|
||||
|
||||
when("there is an agent pod, as determined by its labels matching the agent pod template labels, and it was annotated by the annotater controller, but it is not Running", func() {
|
||||
it.Before(func() {
|
||||
agentPod := newAgentPod(agentPodName, true)
|
||||
agentPod.Status.Phase = corev1.PodPending // not Running
|
||||
r.NoError(kubeClientset.Tracker().Add(agentPod))
|
||||
startInformersAndController()
|
||||
})
|
||||
|
||||
it("does nothing", func() {
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
requireNoExternalActionsTaken()
|
||||
})
|
||||
})
|
||||
|
||||
when("there is an agent pod, as determined by its labels matching the agent pod template labels, which is already annotated by the annotater controller, and it is Running", func() {
|
||||
it.Before(func() {
|
||||
targetAgentPod := newAgentPod(agentPodName, true)
|
||||
targetAgentPod.Status.Phase = corev1.PodRunning
|
||||
anotherAgentPod := newAgentPod("some-other-agent-pod-which-is-not-the-context-of-this-sync", true)
|
||||
r.NoError(kubeClientset.Tracker().Add(targetAgentPod))
|
||||
r.NoError(kubeClientset.Tracker().Add(anotherAgentPod))
|
||||
})
|
||||
|
||||
when("the resulting pod execs will succeed", func() {
|
||||
it.Before(func() {
|
||||
fakeExecutor.resultsToReturn = []string{fakeCertPEM, fakeKeyPEM}
|
||||
})
|
||||
|
||||
when("the cluster-info ConfigMap is not found", func() {
|
||||
it("returns an error and updates the strategy with an error", func() {
|
||||
startInformersAndController()
|
||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), `failed to get cluster-info configmap: configmap "cluster-info" not found`)
|
||||
|
||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
}
|
||||
|
||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
||||
{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotGetClusterInfoStrategyReason,
|
||||
Message: `failed to get cluster-info configmap: configmap "cluster-info" not found`,
|
||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
||||
})
|
||||
})
|
||||
|
||||
when("the cluster-info ConfigMap is missing a key", func() {
|
||||
it.Before(func() {
|
||||
r.NoError(kubeClientset.Tracker().Add(&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ClusterInfoNamespace,
|
||||
Name: clusterInfoName,
|
||||
},
|
||||
Data: map[string]string{"uninteresting-key": "uninteresting-value"},
|
||||
}))
|
||||
})
|
||||
it("returns an error", func() {
|
||||
startInformersAndController()
|
||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), `failed to get kubeconfig key from cluster-info configmap`)
|
||||
})
|
||||
})
|
||||
|
||||
when("the cluster-info ConfigMap is contains invalid YAML", func() {
|
||||
it.Before(func() {
|
||||
r.NoError(kubeClientset.Tracker().Add(&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ClusterInfoNamespace,
|
||||
Name: clusterInfoName,
|
||||
},
|
||||
Data: map[string]string{"kubeconfig": "invalid-yaml"},
|
||||
}))
|
||||
})
|
||||
it("returns an error", func() {
|
||||
startInformersAndController()
|
||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), `failed to load data from kubeconfig key in cluster-info configmap`)
|
||||
})
|
||||
})
|
||||
|
||||
when("the cluster-info ConfigMap is contains an empty list of clusters", func() {
|
||||
it.Before(func() {
|
||||
r.NoError(kubeClientset.Tracker().Add(&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ClusterInfoNamespace,
|
||||
Name: clusterInfoName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"kubeconfig": here.Doc(`
|
||||
kind: Config
|
||||
apiVersion: v1
|
||||
clusters: []
|
||||
`),
|
||||
"uninteresting-key": "uninteresting-value",
|
||||
},
|
||||
}))
|
||||
})
|
||||
it("returns an error", func() {
|
||||
startInformersAndController()
|
||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), `kubeconfig in kubeconfig key in cluster-info configmap did not contain any clusters`)
|
||||
})
|
||||
})
|
||||
|
||||
when("the cluster-info ConfigMap is valid", func() {
|
||||
it.Before(func() {
|
||||
const caData = "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=" // "some-certificate-authority-data" base64 encoded
|
||||
const kubeServerURL = "https://some-server"
|
||||
r.NoError(kubeClientset.Tracker().Add(&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ClusterInfoNamespace,
|
||||
Name: clusterInfoName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"kubeconfig": here.Docf(`
|
||||
kind: Config
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- name: ""
|
||||
cluster:
|
||||
certificate-authority-data: "%s"
|
||||
server: "%s"`,
|
||||
caData, kubeServerURL),
|
||||
"uninteresting-key": "uninteresting-value",
|
||||
},
|
||||
}))
|
||||
})
|
||||
|
||||
it("execs to the agent pod to get the keys and updates the dynamic certificates provider with the new certs", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
|
||||
r.Equal(2, fakeExecutor.callCount)
|
||||
|
||||
r.Equal(agentPodNamespace, fakeExecutor.calledWithPodNamespace[0])
|
||||
r.Equal(agentPodName, fakeExecutor.calledWithPodName[0])
|
||||
r.Equal([]string{"cat", fakeCertPath}, fakeExecutor.calledWithCommandAndArgs[0])
|
||||
|
||||
r.Equal(agentPodNamespace, fakeExecutor.calledWithPodNamespace[1])
|
||||
r.Equal(agentPodName, fakeExecutor.calledWithPodName[1])
|
||||
r.Equal([]string{"cat", fakeKeyPath}, fakeExecutor.calledWithCommandAndArgs[1])
|
||||
|
||||
actualCertPEM, actualKeyPEM := dynamicCertProvider.CurrentCertKeyContent()
|
||||
r.Equal(fakeCertPEM, string(actualCertPEM))
|
||||
r.Equal(fakeKeyPEM, string(actualKeyPEM))
|
||||
})
|
||||
|
||||
when("there is already a CredentialIssuer", func() {
|
||||
var initialCredentialIssuer *configv1alpha1.CredentialIssuer
|
||||
|
||||
it.Before(func() {
|
||||
initialCredentialIssuer = &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{},
|
||||
},
|
||||
}
|
||||
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuer))
|
||||
})
|
||||
|
||||
it("also updates the the existing CredentialIssuer status field", func() {
|
||||
startInformersAndController()
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
|
||||
// The first update to the CredentialIssuer will set the strategy entry
|
||||
expectedCredentialIssuer := initialCredentialIssuer.DeepCopy()
|
||||
expectedCredentialIssuer.Status.Strategies = []configv1alpha1.CredentialIssuerStrategy{
|
||||
{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.SuccessStrategyStatus,
|
||||
Reason: configv1alpha1.FetchedKeyStrategyReason,
|
||||
Message: "Key was fetched successfully",
|
||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
||||
Frontend: &configv1alpha1.CredentialIssuerFrontend{
|
||||
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
|
||||
TokenCredentialRequestAPIInfo: &configv1alpha1.TokenCredentialRequestAPIInfo{
|
||||
Server: "https://some-server",
|
||||
CertificateAuthorityData: "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedCredentialIssuer.Status.KubeConfigInfo = &configv1alpha1.CredentialIssuerKubeConfigInfo{
|
||||
Server: "https://some-server",
|
||||
CertificateAuthorityData: "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=",
|
||||
}
|
||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
||||
expectedCreateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction}, pinnipedAPIClient.Actions())
|
||||
})
|
||||
|
||||
when("updating the CredentialIssuer fails", func() {
|
||||
it.Before(func() {
|
||||
pinnipedAPIClient.PrependReactor(
|
||||
"update",
|
||||
"credentialissuers",
|
||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.New("some update error")
|
||||
},
|
||||
)
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
startInformersAndController()
|
||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
||||
r.EqualError(err, "could not create or update credentialissuer: some update error")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("there is not already a CredentialIssuer", func() {
|
||||
it.Before(func() {
|
||||
server := "https://overridden-server-url.example.com"
|
||||
discoveryURLOverride = &server
|
||||
credentialIssuerLabels = map[string]string{"foo": "bar"}
|
||||
startInformersAndController()
|
||||
})
|
||||
|
||||
it("also creates the the CredentialIssuer with the appropriate status field and labels", func() {
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
|
||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}
|
||||
|
||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
||||
{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.SuccessStrategyStatus,
|
||||
Reason: configv1alpha1.FetchedKeyStrategyReason,
|
||||
Message: "Key was fetched successfully",
|
||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
||||
Frontend: &configv1alpha1.CredentialIssuerFrontend{
|
||||
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
|
||||
TokenCredentialRequestAPIInfo: &configv1alpha1.TokenCredentialRequestAPIInfo{
|
||||
Server: "https://overridden-server-url.example.com",
|
||||
CertificateAuthorityData: "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
KubeConfigInfo: &configv1alpha1.CredentialIssuerKubeConfigInfo{
|
||||
Server: "https://overridden-server-url.example.com",
|
||||
CertificateAuthorityData: "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=",
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("the first resulting pod exec will fail", func() {
|
||||
var podExecErrorMessage string
|
||||
|
||||
it.Before(func() {
|
||||
podExecErrorMessage = "some pod exec error message"
|
||||
fakeExecutor.errorsToReturn = []error{fmt.Errorf(podExecErrorMessage), nil}
|
||||
fakeExecutor.resultsToReturn = []string{"", fakeKeyPEM}
|
||||
startInformersAndController()
|
||||
})
|
||||
|
||||
it("does not update the dynamic certificates provider", func() {
|
||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
|
||||
requireDynamicCertProviderHasDefaultValues()
|
||||
})
|
||||
|
||||
it("creates or updates the the CredentialIssuer status field with an error", func() {
|
||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
|
||||
|
||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
}
|
||||
|
||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
||||
{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: podExecErrorMessage,
|
||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
||||
})
|
||||
})
|
||||
|
||||
when("the second resulting pod exec will fail", func() {
|
||||
var podExecErrorMessage string
|
||||
|
||||
it.Before(func() {
|
||||
podExecErrorMessage = "some pod exec error message"
|
||||
fakeExecutor.errorsToReturn = []error{nil, fmt.Errorf(podExecErrorMessage)}
|
||||
fakeExecutor.resultsToReturn = []string{fakeCertPEM, ""}
|
||||
startInformersAndController()
|
||||
})
|
||||
|
||||
it("does not update the dynamic certificates provider", func() {
|
||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
|
||||
requireDynamicCertProviderHasDefaultValues()
|
||||
})
|
||||
|
||||
it("creates or updates the the CredentialIssuer status field with an error", func() {
|
||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
|
||||
|
||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
}
|
||||
|
||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
||||
{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: podExecErrorMessage,
|
||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
||||
})
|
||||
})
|
||||
|
||||
when("the third resulting pod exec has invalid key data", func() {
|
||||
var keyParseErrorMessage string
|
||||
|
||||
it.Before(func() {
|
||||
keyParseErrorMessage = "failed to set signing cert/key content from agent pod some-namespace/some-agent-pod-name-123: TestManagerControllerSync: attempt to set invalid key pair: tls: failed to find any PEM data in key input"
|
||||
fakeExecutor.errorsToReturn = []error{nil, nil}
|
||||
fakeExecutor.resultsToReturn = []string{fakeCertPEM, ""}
|
||||
startInformersAndController()
|
||||
})
|
||||
|
||||
it("does not update the dynamic certificates provider", func() {
|
||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), keyParseErrorMessage)
|
||||
requireDynamicCertProviderHasDefaultValues()
|
||||
})
|
||||
|
||||
it("creates or updates the the CredentialIssuer status field with an error", func() {
|
||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), keyParseErrorMessage)
|
||||
|
||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
}
|
||||
|
||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: credentialIssuerResourceName,
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
||||
{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: keyParseErrorMessage,
|
||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
||||
})
|
||||
})
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
@@ -1,296 +1,532 @@
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package kubecertagent provides controllers that ensure a set of pods (the kube-cert-agent), is
|
||||
// colocated with the Kubernetes controller manager so that Pinniped can access its signing keys.
|
||||
//
|
||||
// Note: the controllers use a filter that accepts all pods that look like the controller manager or
|
||||
// an agent pod, across any add/update/delete event. Each of the controllers only care about a
|
||||
// subset of these events in reality, but the liberal filter implementation serves as an MVP.
|
||||
// Package kubecertagent provides controllers that ensure a pod (the kube-cert-agent), is
|
||||
// co-located with the Kubernetes controller manager so that Pinniped can access its signing keys.
|
||||
package kubecertagent
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/spf13/pflag"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/cache"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
appsv1informers "k8s.io/client-go/informers/apps/v1"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controller/issuerconfig"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/dynamiccert"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
)
|
||||
|
||||
const (
|
||||
// ControllerManagerNamespace is the assumed namespace of the kube-controller-manager pod(s).
|
||||
ControllerManagerNamespace = "kube-system"
|
||||
|
||||
// controllerManagerNameAnnotationKey is used to store an agent pod's parent's name, i.e., the
|
||||
// name of the controller manager pod with which it is supposed to be in sync.
|
||||
controllerManagerNameAnnotationKey = "kube-cert-agent.pinniped.dev/controller-manager-name"
|
||||
// controllerManagerUIDAnnotationKey is used to store an agent pod's parent's UID, i.e., the UID
|
||||
// of the controller manager pod with which it is supposed to be in sync.
|
||||
controllerManagerUIDAnnotationKey = "kube-cert-agent.pinniped.dev/controller-manager-uid"
|
||||
|
||||
// agentPodLabelKey is used to identify which pods are created by the kube-cert-agent
|
||||
// controllers.
|
||||
agentPodLabelKey = "kube-cert-agent.pinniped.dev"
|
||||
agentPodLabelValue = "true"
|
||||
agentPodLabelValue = "v2"
|
||||
|
||||
// agentPodCertPathAnnotationKey is the annotation that the kube-cert-agent pod will use
|
||||
// to communicate the in-pod path to the kube API's certificate.
|
||||
agentPodCertPathAnnotationKey = "kube-cert-agent.pinniped.dev/cert-path"
|
||||
|
||||
// agentPodKeyPathAnnotationKey is the annotation that the kube-cert-agent pod will use
|
||||
// to communicate the in-pod path to the kube API's key.
|
||||
agentPodKeyPathAnnotationKey = "kube-cert-agent.pinniped.dev/key-path"
|
||||
ClusterInfoNamespace = "kube-public"
|
||||
clusterInfoName = "cluster-info"
|
||||
clusterInfoConfigMapKey = "kubeconfig"
|
||||
)
|
||||
|
||||
type AgentPodConfig struct {
|
||||
// The namespace in which agent pods will be created.
|
||||
// AgentConfig is the configuration for the kube-cert-agent controller.
|
||||
type AgentConfig struct {
|
||||
// Namespace in which agent pods will be created.
|
||||
Namespace string
|
||||
|
||||
// The container image used for the agent pods.
|
||||
// ContainerImage specifies the container image used for the agent pods.
|
||||
ContainerImage string
|
||||
|
||||
// The name prefix for each of the agent pods.
|
||||
PodNamePrefix string
|
||||
// NamePrefix will be prefixed to all agent pod names.
|
||||
NamePrefix string
|
||||
|
||||
// ServiceAccountName is the service account under which to run the agent pods.
|
||||
ServiceAccountName string
|
||||
|
||||
// ContainerImagePullSecrets is a list of names of Kubernetes Secret objects that will be used as
|
||||
// ImagePullSecrets on the kube-cert-agent pods.
|
||||
ContainerImagePullSecrets []string
|
||||
|
||||
// Additional labels that should be added to every agent pod during creation.
|
||||
AdditionalLabels map[string]string
|
||||
// CredentialIssuerName specifies the CredentialIssuer to be created/updated.
|
||||
CredentialIssuerName string
|
||||
|
||||
// Labels to be applied to the CredentialIssuer and agent pods.
|
||||
Labels map[string]string
|
||||
|
||||
// DiscoveryURLOverride is the Kubernetes server endpoint to report in the CredentialIssuer, overriding any
|
||||
// value discovered in the kube-public/cluster-info ConfigMap.
|
||||
DiscoveryURLOverride *string
|
||||
}
|
||||
|
||||
type CredentialIssuerLocationConfig struct {
|
||||
// The resource name for the CredentialIssuer to be created/updated.
|
||||
Name string
|
||||
}
|
||||
|
||||
func (c *AgentPodConfig) Labels() map[string]string {
|
||||
allLabels := map[string]string{
|
||||
agentPodLabelKey: agentPodLabelValue,
|
||||
}
|
||||
for k, v := range c.AdditionalLabels {
|
||||
func (a *AgentConfig) agentLabels() map[string]string {
|
||||
allLabels := map[string]string{agentPodLabelKey: agentPodLabelValue}
|
||||
for k, v := range a.Labels {
|
||||
allLabels[k] = v
|
||||
}
|
||||
return allLabels
|
||||
}
|
||||
|
||||
func (c *AgentPodConfig) AgentSelector() labels.Selector {
|
||||
return labels.SelectorFromSet(map[string]string{agentPodLabelKey: agentPodLabelValue})
|
||||
func (a *AgentConfig) deploymentName() string {
|
||||
return strings.TrimSuffix(a.NamePrefix, "-")
|
||||
}
|
||||
|
||||
func (c *AgentPodConfig) newAgentPod(controllerManagerPod *corev1.Pod) *corev1.Pod {
|
||||
terminateImmediately := int64(0)
|
||||
rootID := int64(0)
|
||||
f := false
|
||||
falsePtr := &f
|
||||
|
||||
imagePullSecrets := []corev1.LocalObjectReference{}
|
||||
for _, imagePullSecret := range c.ContainerImagePullSecrets {
|
||||
imagePullSecrets = append(
|
||||
imagePullSecrets,
|
||||
corev1.LocalObjectReference{
|
||||
Name: imagePullSecret,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s%s", c.PodNamePrefix, hash(controllerManagerPod)),
|
||||
Namespace: c.Namespace,
|
||||
Labels: c.Labels(),
|
||||
Annotations: map[string]string{
|
||||
controllerManagerNameAnnotationKey: controllerManagerPod.Name,
|
||||
controllerManagerUIDAnnotationKey: string(controllerManagerPod.UID),
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &terminateImmediately,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "sleeper",
|
||||
Image: c.ContainerImage,
|
||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||
Command: []string{"/bin/sleep", "infinity"},
|
||||
VolumeMounts: controllerManagerPod.Spec.Containers[0].VolumeMounts,
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Limits: corev1.ResourceList{
|
||||
corev1.ResourceMemory: resource.MustParse("16Mi"),
|
||||
corev1.ResourceCPU: resource.MustParse("10m"),
|
||||
},
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceMemory: resource.MustParse("16Mi"),
|
||||
corev1.ResourceCPU: resource.MustParse("10m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: controllerManagerPod.Spec.Volumes,
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
NodeSelector: controllerManagerPod.Spec.NodeSelector,
|
||||
AutomountServiceAccountToken: falsePtr,
|
||||
NodeName: controllerManagerPod.Spec.NodeName,
|
||||
Tolerations: controllerManagerPod.Spec.Tolerations,
|
||||
// We need to run the agent pod as root since the file permissions
|
||||
// on the cluster keypair usually restricts access to only root.
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsUser: &rootID,
|
||||
RunAsGroup: &rootID,
|
||||
},
|
||||
},
|
||||
}
|
||||
type agentController struct {
|
||||
cfg AgentConfig
|
||||
client *kubeclient.Client
|
||||
kubeSystemPods corev1informers.PodInformer
|
||||
agentDeployments appsv1informers.DeploymentInformer
|
||||
agentPods corev1informers.PodInformer
|
||||
kubePublicConfigMaps corev1informers.ConfigMapInformer
|
||||
executor PodCommandExecutor
|
||||
dynamicCertProvider dynamiccert.Private
|
||||
clock clock.Clock
|
||||
log logr.Logger
|
||||
execCache *cache.Expiring
|
||||
}
|
||||
|
||||
func isAgentPodUpToDate(actualAgentPod, expectedAgentPod *corev1.Pod) bool {
|
||||
requiredLabelsAllPresentWithCorrectValues := true
|
||||
actualLabels := actualAgentPod.ObjectMeta.Labels
|
||||
for expectedLabelKey, expectedLabelValue := range expectedAgentPod.ObjectMeta.Labels {
|
||||
if actualLabels[expectedLabelKey] != expectedLabelValue {
|
||||
requiredLabelsAllPresentWithCorrectValues = false
|
||||
break
|
||||
var (
|
||||
// controllerManagerLabels are the Kubernetes labels we expect on the kube-controller-manager Pod.
|
||||
controllerManagerLabels = labels.SelectorFromSet(map[string]string{ //nolint: gochecknoglobals
|
||||
"component": "kube-controller-manager",
|
||||
})
|
||||
|
||||
// agentLabels are the Kubernetes labels we always expect on the kube-controller-manager Pod.
|
||||
agentLabels = labels.SelectorFromSet(map[string]string{ //nolint: gochecknoglobals
|
||||
agentPodLabelKey: agentPodLabelValue,
|
||||
})
|
||||
)
|
||||
|
||||
// NewAgentController returns a controller that manages the kube-cert-agent Deployment. It also is tasked with updating
|
||||
// the CredentialIssuer with any errors that it encounters.
|
||||
func NewAgentController(
|
||||
cfg AgentConfig,
|
||||
client *kubeclient.Client,
|
||||
kubeSystemPods corev1informers.PodInformer,
|
||||
agentDeployments appsv1informers.DeploymentInformer,
|
||||
agentPods corev1informers.PodInformer,
|
||||
kubePublicConfigMaps corev1informers.ConfigMapInformer,
|
||||
dynamicCertProvider dynamiccert.Private,
|
||||
) controllerlib.Controller {
|
||||
return newAgentController(
|
||||
cfg,
|
||||
client,
|
||||
kubeSystemPods,
|
||||
agentDeployments,
|
||||
agentPods,
|
||||
kubePublicConfigMaps,
|
||||
NewPodCommandExecutor(client.JSONConfig, client.Kubernetes),
|
||||
dynamicCertProvider,
|
||||
&clock.RealClock{},
|
||||
cache.NewExpiring(),
|
||||
klogr.New(),
|
||||
)
|
||||
}
|
||||
|
||||
func newAgentController(
|
||||
cfg AgentConfig,
|
||||
client *kubeclient.Client,
|
||||
kubeSystemPods corev1informers.PodInformer,
|
||||
agentDeployments appsv1informers.DeploymentInformer,
|
||||
agentPods corev1informers.PodInformer,
|
||||
kubePublicConfigMaps corev1informers.ConfigMapInformer,
|
||||
podCommandExecutor PodCommandExecutor,
|
||||
dynamicCertProvider dynamiccert.Private,
|
||||
clock clock.Clock,
|
||||
execCache *cache.Expiring,
|
||||
log logr.Logger,
|
||||
options ...controllerlib.Option,
|
||||
) controllerlib.Controller {
|
||||
return controllerlib.New(
|
||||
controllerlib.Config{
|
||||
Name: "kube-cert-agent-controller",
|
||||
Syncer: &agentController{
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
kubeSystemPods: kubeSystemPods,
|
||||
agentDeployments: agentDeployments,
|
||||
agentPods: agentPods,
|
||||
kubePublicConfigMaps: kubePublicConfigMaps,
|
||||
executor: podCommandExecutor,
|
||||
dynamicCertProvider: dynamicCertProvider,
|
||||
clock: clock,
|
||||
log: log.WithName("kube-cert-agent-controller"),
|
||||
execCache: execCache,
|
||||
},
|
||||
},
|
||||
append([]controllerlib.Option{
|
||||
controllerlib.WithInformer(
|
||||
kubeSystemPods,
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
|
||||
return controllerManagerLabels.Matches(labels.Set(obj.GetLabels()))
|
||||
}),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
controllerlib.WithInformer(
|
||||
agentDeployments,
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
|
||||
return obj.GetNamespace() == cfg.Namespace && obj.GetName() == cfg.deploymentName()
|
||||
}),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
controllerlib.WithInformer(
|
||||
agentPods,
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
|
||||
return agentLabels.Matches(labels.Set(obj.GetLabels()))
|
||||
}),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
controllerlib.WithInformer(
|
||||
kubePublicConfigMaps,
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
|
||||
return obj.GetNamespace() == ClusterInfoNamespace && obj.GetName() == clusterInfoName
|
||||
}),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
// Be sure to run once even to make sure the CredentialIssuer is updated if there are no controller manager
|
||||
// pods. We should be able to pass an empty key since we don't use the key in the sync (we sync
|
||||
// the world).
|
||||
controllerlib.WithInitialEvent(controllerlib.Key{}),
|
||||
}, options...)...,
|
||||
)
|
||||
}
|
||||
|
||||
// Sync implements controllerlib.Syncer.
|
||||
func (c *agentController) Sync(ctx controllerlib.Context) error {
|
||||
// Find the latest healthy kube-controller-manager Pod in kube-system..
|
||||
controllerManagerPods, err := c.kubeSystemPods.Lister().Pods(ControllerManagerNamespace).List(controllerManagerLabels)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("could not list controller manager pods: %w", err)
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
}
|
||||
newestControllerManager := newestRunningPod(controllerManagerPods)
|
||||
|
||||
// If there are no healthy controller manager pods, we alert the user that we can't find the keypair via
|
||||
// the CredentialIssuer.
|
||||
if newestControllerManager == nil {
|
||||
err := fmt.Errorf("could not find a healthy kube-controller-manager pod (%s)", pluralize(controllerManagerPods))
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
}
|
||||
|
||||
if err := c.createOrUpdateDeployment(ctx, newestControllerManager); err != nil {
|
||||
err := fmt.Errorf("could not ensure agent deployment: %w", err)
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
}
|
||||
|
||||
// Find the latest healthy agent Pod in our namespace.
|
||||
agentPods, err := c.agentPods.Lister().Pods(c.cfg.Namespace).List(agentLabels)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("could not list agent pods: %w", err)
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
}
|
||||
newestAgentPod := newestRunningPod(agentPods)
|
||||
|
||||
// If there are no healthy controller agent pods, we alert the user that we can't find the keypair via
|
||||
// the CredentialIssuer.
|
||||
if newestAgentPod == nil {
|
||||
err := fmt.Errorf("could not find a healthy agent pod (%s)", pluralize(agentPods))
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
}
|
||||
|
||||
// Load the Kubernetes API info from the kube-public/cluster-info ConfigMap.
|
||||
configMap, err := c.kubePublicConfigMaps.Lister().ConfigMaps(ClusterInfoNamespace).Get(clusterInfoName)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to get %s/%s configmap: %w", ClusterInfoNamespace, clusterInfoName, err)
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason)
|
||||
}
|
||||
|
||||
apiInfo, err := c.extractAPIInfo(configMap)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("could not extract Kubernetes API endpoint info from %s/%s configmap: %w", ClusterInfoNamespace, clusterInfoName, err)
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason)
|
||||
}
|
||||
|
||||
// Load the certificate and key from the agent pod into our in-memory signer.
|
||||
if err := c.loadSigningKey(newestAgentPod); err != nil {
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
}
|
||||
|
||||
// Set the CredentialIssuer strategy to successful.
|
||||
return issuerconfig.UpdateStrategy(
|
||||
ctx.Context,
|
||||
c.cfg.CredentialIssuerName,
|
||||
c.cfg.Labels,
|
||||
c.client.PinnipedConcierge,
|
||||
configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.SuccessStrategyStatus,
|
||||
Reason: configv1alpha1.FetchedKeyStrategyReason,
|
||||
Message: "key was fetched successfully",
|
||||
LastUpdateTime: metav1.NewTime(c.clock.Now()),
|
||||
Frontend: &configv1alpha1.CredentialIssuerFrontend{
|
||||
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
|
||||
TokenCredentialRequestAPIInfo: apiInfo,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *agentController) loadSigningKey(agentPod *corev1.Pod) error {
|
||||
// If we remember successfully loading the key from this pod recently, we can skip this step and return immediately.
|
||||
if _, exists := c.execCache.Get(agentPod.UID); exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Exec into the agent pod and cat out the certificate and the key.
|
||||
combinedPEM, err := c.executor.Exec(
|
||||
agentPod.Namespace, agentPod.Name,
|
||||
"sh", "-c", "cat ${CERT_PATH}; echo; echo; cat ${KEY_PATH}",
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not exec into agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||
}
|
||||
|
||||
// Split up the output by looking for the block of newlines.
|
||||
var certPEM, keyPEM string
|
||||
if parts := strings.Split(combinedPEM, "\n\n\n"); len(parts) == 2 {
|
||||
certPEM, keyPEM = parts[0], parts[1]
|
||||
}
|
||||
|
||||
// Load the certificate and key into the dynamic signer.
|
||||
if err := c.dynamicCertProvider.SetCertKeyContent([]byte(certPEM), []byte(keyPEM)); err != nil {
|
||||
return fmt.Errorf("failed to set signing cert/key content from agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||
}
|
||||
|
||||
// Remember that we've successfully loaded the key from this pod so we can skip the exec+load if nothing has changed.
|
||||
c.execCache.Set(agentPod.UID, struct{}{}, 15*time.Minute)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *agentController) createOrUpdateDeployment(ctx controllerlib.Context, newestControllerManager *corev1.Pod) error {
|
||||
// Build the expected Deployment based on the kube-controller-manager Pod as a template.
|
||||
expectedDeployment := c.newAgentDeployment(newestControllerManager)
|
||||
|
||||
// Try to get the existing Deployment, if it exists.
|
||||
existingDeployment, err := c.agentDeployments.Lister().Deployments(expectedDeployment.Namespace).Get(expectedDeployment.Name)
|
||||
notFound := k8serrors.IsNotFound(err)
|
||||
if err != nil && !notFound {
|
||||
return fmt.Errorf("could not get deployments: %w", err)
|
||||
}
|
||||
|
||||
log := c.log.WithValues(
|
||||
"deployment", klog.KObj(expectedDeployment),
|
||||
"templatePod", klog.KObj(newestControllerManager),
|
||||
)
|
||||
|
||||
// If the Deployment did not exist, create it and be done.
|
||||
if notFound {
|
||||
log.Info("creating new deployment")
|
||||
_, err := c.client.Kubernetes.AppsV1().Deployments(expectedDeployment.Namespace).Create(ctx.Context, expectedDeployment, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Otherwise update the spec of the Deployment to match our desired state.
|
||||
updatedDeployment := existingDeployment.DeepCopy()
|
||||
updatedDeployment.Spec = expectedDeployment.Spec
|
||||
updatedDeployment.ObjectMeta = mergeLabelsAndAnnotations(updatedDeployment.ObjectMeta, expectedDeployment.ObjectMeta)
|
||||
|
||||
// If the existing Deployment already matches our desired spec, we're done.
|
||||
if apiequality.Semantic.DeepDerivative(updatedDeployment, existingDeployment) {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("updating existing deployment")
|
||||
_, err = c.client.Kubernetes.AppsV1().Deployments(updatedDeployment.Namespace).Update(ctx.Context, updatedDeployment, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *agentController) failStrategyAndErr(ctx context.Context, err error, reason configv1alpha1.StrategyReason) error {
|
||||
return utilerrors.NewAggregate([]error{err, issuerconfig.UpdateStrategy(
|
||||
ctx,
|
||||
c.cfg.CredentialIssuerName,
|
||||
c.cfg.Labels,
|
||||
c.client.PinnipedConcierge,
|
||||
configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: reason,
|
||||
Message: err.Error(),
|
||||
LastUpdateTime: metav1.NewTime(c.clock.Now()),
|
||||
},
|
||||
)})
|
||||
}
|
||||
|
||||
func (c *agentController) extractAPIInfo(configMap *corev1.ConfigMap) (*configv1alpha1.TokenCredentialRequestAPIInfo, error) {
|
||||
kubeConfigYAML, kubeConfigPresent := configMap.Data[clusterInfoConfigMapKey]
|
||||
if !kubeConfigPresent {
|
||||
return nil, fmt.Errorf("missing %q key", clusterInfoConfigMapKey)
|
||||
}
|
||||
|
||||
kubeconfig, err := clientcmd.Load([]byte(kubeConfigYAML))
|
||||
if err != nil {
|
||||
// We purposefully don't wrap "err" here because it's very verbose.
|
||||
return nil, fmt.Errorf("key %q does not contain a valid kubeconfig", clusterInfoConfigMapKey)
|
||||
}
|
||||
|
||||
for _, v := range kubeconfig.Clusters {
|
||||
result := &configv1alpha1.TokenCredentialRequestAPIInfo{
|
||||
Server: v.Server,
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString(v.CertificateAuthorityData),
|
||||
}
|
||||
if c.cfg.DiscoveryURLOverride != nil {
|
||||
result.Server = *c.cfg.DiscoveryURLOverride
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
return nil, fmt.Errorf("kubeconfig in key %q does not contain any clusters", clusterInfoConfigMapKey)
|
||||
}
|
||||
|
||||
// newestRunningPod takes a list of pods and returns the newest one with status.phase == "Running".
|
||||
func newestRunningPod(pods []*corev1.Pod) *corev1.Pod {
|
||||
// Compare two pods based on creation timestamp, breaking ties by name
|
||||
newer := func(a, b *corev1.Pod) bool {
|
||||
if a.CreationTimestamp.Time.Equal(b.CreationTimestamp.Time) {
|
||||
return a.Name < b.Name
|
||||
}
|
||||
return a.CreationTimestamp.After(b.CreationTimestamp.Time)
|
||||
}
|
||||
|
||||
var result *corev1.Pod
|
||||
for _, pod := range pods {
|
||||
if pod.Status.Phase == corev1.PodRunning && (result == nil || newer(pod, result)) {
|
||||
result = pod
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) *appsv1.Deployment {
|
||||
var volumeMounts []corev1.VolumeMount
|
||||
if len(controllerManagerPod.Spec.Containers) > 0 {
|
||||
volumeMounts = controllerManagerPod.Spec.Containers[0].VolumeMounts
|
||||
}
|
||||
|
||||
var imagePullSecrets []corev1.LocalObjectReference
|
||||
if len(c.cfg.ContainerImagePullSecrets) > 0 {
|
||||
imagePullSecrets = make([]corev1.LocalObjectReference, 0, len(c.cfg.ContainerImagePullSecrets))
|
||||
for _, name := range c.cfg.ContainerImagePullSecrets {
|
||||
imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{Name: name})
|
||||
}
|
||||
}
|
||||
|
||||
if actualAgentPod.Spec.SecurityContext == nil {
|
||||
return false
|
||||
}
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.cfg.deploymentName(),
|
||||
Namespace: c.cfg.Namespace,
|
||||
Labels: c.cfg.Labels,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: pointer.Int32Ptr(1),
|
||||
Selector: metav1.SetAsLabelSelector(c.cfg.agentLabels()),
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: c.cfg.agentLabels(),
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
TerminationGracePeriodSeconds: pointer.Int64Ptr(0),
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "sleeper",
|
||||
Image: c.cfg.ContainerImage,
|
||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||
Command: []string{"/bin/sleep", "infinity"},
|
||||
VolumeMounts: volumeMounts,
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "CERT_PATH", Value: getContainerArgByName(controllerManagerPod, "cluster-signing-cert-file", "/etc/kubernetes/ca/ca.pem")},
|
||||
{Name: "KEY_PATH", Value: getContainerArgByName(controllerManagerPod, "cluster-signing-key-file", "/etc/kubernetes/ca/ca.key")},
|
||||
},
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Limits: corev1.ResourceList{
|
||||
corev1.ResourceMemory: resource.MustParse("16Mi"),
|
||||
corev1.ResourceCPU: resource.MustParse("10m"),
|
||||
},
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceMemory: resource.MustParse("16Mi"),
|
||||
corev1.ResourceCPU: resource.MustParse("10m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: controllerManagerPod.Spec.Volumes,
|
||||
RestartPolicy: corev1.RestartPolicyAlways,
|
||||
NodeSelector: controllerManagerPod.Spec.NodeSelector,
|
||||
AutomountServiceAccountToken: pointer.BoolPtr(false),
|
||||
ServiceAccountName: c.cfg.ServiceAccountName,
|
||||
NodeName: controllerManagerPod.Spec.NodeName,
|
||||
Tolerations: controllerManagerPod.Spec.Tolerations,
|
||||
// We need to run the agent pod as root since the file permissions
|
||||
// on the cluster keypair usually restricts access to only root.
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsUser: pointer.Int64Ptr(0),
|
||||
RunAsGroup: pointer.Int64Ptr(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
return requiredLabelsAllPresentWithCorrectValues &&
|
||||
equality.Semantic.DeepEqual(
|
||||
actualAgentPod.Spec.Containers[0].VolumeMounts,
|
||||
expectedAgentPod.Spec.Containers[0].VolumeMounts,
|
||||
) &&
|
||||
equality.Semantic.DeepEqual(
|
||||
actualAgentPod.Spec.Containers[0].Name,
|
||||
expectedAgentPod.Spec.Containers[0].Name,
|
||||
) &&
|
||||
equality.Semantic.DeepEqual(
|
||||
actualAgentPod.Spec.Containers[0].Image,
|
||||
expectedAgentPod.Spec.Containers[0].Image,
|
||||
) &&
|
||||
equality.Semantic.DeepEqual(
|
||||
actualAgentPod.Spec.Containers[0].Command,
|
||||
expectedAgentPod.Spec.Containers[0].Command,
|
||||
) &&
|
||||
equality.Semantic.DeepEqual(
|
||||
actualAgentPod.Spec.Volumes,
|
||||
expectedAgentPod.Spec.Volumes,
|
||||
) &&
|
||||
equality.Semantic.DeepEqual(
|
||||
actualAgentPod.Spec.RestartPolicy,
|
||||
expectedAgentPod.Spec.RestartPolicy,
|
||||
) &&
|
||||
equality.Semantic.DeepEqual(
|
||||
actualAgentPod.Spec.NodeSelector,
|
||||
expectedAgentPod.Spec.NodeSelector,
|
||||
) &&
|
||||
equality.Semantic.DeepEqual(
|
||||
actualAgentPod.Spec.AutomountServiceAccountToken,
|
||||
expectedAgentPod.Spec.AutomountServiceAccountToken,
|
||||
) &&
|
||||
equality.Semantic.DeepEqual(
|
||||
actualAgentPod.Spec.NodeName,
|
||||
expectedAgentPod.Spec.NodeName,
|
||||
) &&
|
||||
equality.Semantic.DeepEqual(
|
||||
actualAgentPod.Spec.Tolerations,
|
||||
expectedAgentPod.Spec.Tolerations,
|
||||
) &&
|
||||
equality.Semantic.DeepEqual(
|
||||
actualAgentPod.Spec.SecurityContext.RunAsUser,
|
||||
expectedAgentPod.Spec.SecurityContext.RunAsUser,
|
||||
) &&
|
||||
equality.Semantic.DeepEqual(
|
||||
actualAgentPod.Spec.SecurityContext.RunAsGroup,
|
||||
expectedAgentPod.Spec.SecurityContext.RunAsGroup,
|
||||
)
|
||||
}
|
||||
|
||||
func isControllerManagerPod(obj metav1.Object) bool {
|
||||
pod, ok := obj.(*corev1.Pod)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if pod.Labels == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
component, ok := pod.Labels["component"]
|
||||
if !ok || component != "kube-controller-manager" {
|
||||
return false
|
||||
}
|
||||
|
||||
if pod.Status.Phase != corev1.PodRunning {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func isAgentPod(obj metav1.Object) bool {
|
||||
value, foundLabel := obj.GetLabels()[agentPodLabelKey]
|
||||
return foundLabel && value == agentPodLabelValue
|
||||
}
|
||||
|
||||
func findControllerManagerPodForSpecificAgentPod(
|
||||
agentPod *corev1.Pod,
|
||||
kubeSystemPodInformer corev1informers.PodInformer,
|
||||
) (*corev1.Pod, error) {
|
||||
name, ok := agentPod.Annotations[controllerManagerNameAnnotationKey]
|
||||
if !ok {
|
||||
plog.Debug("agent pod missing parent name annotation", "pod", agentPod.Name)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
uid, ok := agentPod.Annotations[controllerManagerUIDAnnotationKey]
|
||||
if !ok {
|
||||
plog.Debug("agent pod missing parent uid annotation", "pod", agentPod.Name)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
maybeControllerManagerPod, err := kubeSystemPodInformer.
|
||||
Lister().
|
||||
Pods(ControllerManagerNamespace).
|
||||
Get(name)
|
||||
notFound := k8serrors.IsNotFound(err)
|
||||
if err != nil && !notFound {
|
||||
return nil, fmt.Errorf("cannot get controller pod: %w", err)
|
||||
} else if notFound ||
|
||||
maybeControllerManagerPod == nil ||
|
||||
string(maybeControllerManagerPod.UID) != uid {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return maybeControllerManagerPod, nil
|
||||
}
|
||||
|
||||
func strategyError(clock clock.Clock, err error) configv1alpha1.CredentialIssuerStrategy {
|
||||
return configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||
Message: err.Error(),
|
||||
LastUpdateTime: metav1.NewTime(clock.Now()),
|
||||
// Setting MinReadySeconds prevents the agent pods from being churned too quickly by the deployments controller.
|
||||
MinReadySeconds: 10,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func hash(controllerManagerPod *corev1.Pod) string {
|
||||
// FNV should be faster than SHA, and we don't care about hash-reversibility here, and Kubernetes
|
||||
// uses FNV for their pod templates, so should be good enough for us?
|
||||
h := fnv.New32a()
|
||||
_, _ = h.Write([]byte(controllerManagerPod.UID)) // Never returns an error, per godoc.
|
||||
return hex.EncodeToString(h.Sum([]byte{}))
|
||||
func mergeLabelsAndAnnotations(existing metav1.ObjectMeta, desired metav1.ObjectMeta) metav1.ObjectMeta {
|
||||
result := existing.DeepCopy()
|
||||
for k, v := range desired.Labels {
|
||||
if result.Labels == nil {
|
||||
result.Labels = map[string]string{}
|
||||
}
|
||||
result.Labels[k] = v
|
||||
}
|
||||
for k, v := range desired.Annotations {
|
||||
if result.Annotations == nil {
|
||||
result.Annotations = map[string]string{}
|
||||
}
|
||||
result.Annotations[k] = v
|
||||
}
|
||||
return *result
|
||||
}
|
||||
|
||||
func getContainerArgByName(pod *corev1.Pod, name, fallbackValue string) string {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
flagset := pflag.NewFlagSet("", pflag.ContinueOnError)
|
||||
flagset.ParseErrorsWhitelist = pflag.ParseErrorsWhitelist{UnknownFlags: true}
|
||||
var val string
|
||||
flagset.StringVar(&val, name, "", "")
|
||||
_ = flagset.Parse(append(container.Command, container.Args...))
|
||||
if val != "" {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return fallbackValue
|
||||
}
|
||||
|
||||
func pluralize(pods []*corev1.Pod) string {
|
||||
if len(pods) == 1 {
|
||||
return "1 candidate"
|
||||
}
|
||||
return fmt.Sprintf("%d candidates", len(pods))
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
63
internal/controller/kubecertagent/legacypodcleaner.go
Normal file
63
internal/controller/kubecertagent/legacypodcleaner.go
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubecertagent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
)
|
||||
|
||||
// NewLegacyPodCleanerController returns a controller that cleans up legacy kube-cert-agent Pods created by Pinniped v0.7.0 and below.
|
||||
func NewLegacyPodCleanerController(
|
||||
cfg AgentConfig,
|
||||
client *kubeclient.Client,
|
||||
agentPods corev1informers.PodInformer,
|
||||
log logr.Logger,
|
||||
options ...controllerlib.Option,
|
||||
) controllerlib.Controller {
|
||||
// legacyAgentLabels are the Kubernetes labels we previously added to agent pods (the new value is "v2").
|
||||
// We also expect these pods to have the "extra" labels configured on the Concierge.
|
||||
legacyAgentLabels := map[string]string{"kube-cert-agent.pinniped.dev": "true"}
|
||||
for k, v := range cfg.Labels {
|
||||
legacyAgentLabels[k] = v
|
||||
}
|
||||
legacyAgentSelector := labels.SelectorFromSet(legacyAgentLabels)
|
||||
|
||||
log = log.WithName("legacy-pod-cleaner-controller")
|
||||
|
||||
return controllerlib.New(
|
||||
controllerlib.Config{
|
||||
Name: "legacy-pod-cleaner-controller",
|
||||
Syncer: controllerlib.SyncFunc(func(ctx controllerlib.Context) error {
|
||||
if err := client.Kubernetes.CoreV1().Pods(ctx.Key.Namespace).Delete(ctx.Context, ctx.Key.Name, metav1.DeleteOptions{}); err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("could not delete legacy agent pod: %w", err)
|
||||
}
|
||||
log.Info("deleted legacy kube-cert-agent pod", "pod", klog.KRef(ctx.Key.Namespace, ctx.Key.Name))
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
append([]controllerlib.Option{
|
||||
controllerlib.WithInformer(
|
||||
agentPods,
|
||||
pinnipedcontroller.SimpleFilter(func(obj metav1.Object) bool {
|
||||
return obj.GetNamespace() == cfg.Namespace && legacyAgentSelector.Matches(labels.Set(obj.GetLabels()))
|
||||
}, nil),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
}, options...)...,
|
||||
)
|
||||
}
|
||||
145
internal/controller/kubecertagent/legacypodcleaner_test.go
Normal file
145
internal/controller/kubecertagent/legacypodcleaner_test.go
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubecertagent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
coretesting "k8s.io/client-go/testing"
|
||||
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
"go.pinniped.dev/internal/testutil/testlogger"
|
||||
)
|
||||
|
||||
func TestLegacyPodCleanerController(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
legacyAgentPodWithoutExtraLabel := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "concierge",
|
||||
Name: "pinniped-concierge-kube-cert-agent-without-extra-label",
|
||||
Labels: map[string]string{"kube-cert-agent.pinniped.dev": "true"},
|
||||
},
|
||||
Spec: corev1.PodSpec{},
|
||||
Status: corev1.PodStatus{Phase: corev1.PodRunning},
|
||||
}
|
||||
|
||||
legacyAgentPodWithExtraLabel := legacyAgentPodWithoutExtraLabel.DeepCopy()
|
||||
legacyAgentPodWithExtraLabel.Name = "pinniped-concierge-kube-cert-agent-with-extra-label"
|
||||
legacyAgentPodWithExtraLabel.Labels["extralabel"] = "labelvalue"
|
||||
legacyAgentPodWithExtraLabel.Labels["anotherextralabel"] = "labelvalue"
|
||||
|
||||
nonLegacyAgentPod := legacyAgentPodWithExtraLabel.DeepCopy()
|
||||
nonLegacyAgentPod.Name = "pinniped-concierge-kube-cert-agent-not-legacy"
|
||||
nonLegacyAgentPod.Labels["kube-cert-agent.pinniped.dev"] = "v2"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
kubeObjects []runtime.Object
|
||||
addKubeReactions func(*kubefake.Clientset)
|
||||
wantDistinctErrors []string
|
||||
wantDistinctLogs []string
|
||||
wantActions []coretesting.Action
|
||||
}{
|
||||
{
|
||||
name: "no pods",
|
||||
wantActions: []coretesting.Action{},
|
||||
},
|
||||
{
|
||||
name: "mix of pods",
|
||||
kubeObjects: []runtime.Object{
|
||||
legacyAgentPodWithoutExtraLabel, // should not be delete (missing extra label)
|
||||
legacyAgentPodWithExtraLabel, // should be deleted
|
||||
nonLegacyAgentPod, // should not be deleted (missing legacy agent label)
|
||||
},
|
||||
wantDistinctErrors: []string{""},
|
||||
wantDistinctLogs: []string{
|
||||
`legacy-pod-cleaner-controller "level"=0 "msg"="deleted legacy kube-cert-agent pod" "pod"={"name":"pinniped-concierge-kube-cert-agent-with-extra-label","namespace":"concierge"}`,
|
||||
},
|
||||
wantActions: []coretesting.Action{ // the first delete triggers the informer again, but the second invocation triggers a Not Found
|
||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fail to delete",
|
||||
kubeObjects: []runtime.Object{
|
||||
legacyAgentPodWithoutExtraLabel, // should not be delete (missing extra label)
|
||||
legacyAgentPodWithExtraLabel, // should be deleted
|
||||
nonLegacyAgentPod, // should not be deleted (missing legacy agent label)
|
||||
},
|
||||
addKubeReactions: func(clientset *kubefake.Clientset) {
|
||||
clientset.PrependReactor("delete", "*", func(action coretesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, fmt.Errorf("some delete error")
|
||||
})
|
||||
},
|
||||
wantDistinctErrors: []string{
|
||||
"could not delete legacy agent pod: some delete error",
|
||||
},
|
||||
wantActions: []coretesting.Action{
|
||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fail to delete because of not found error",
|
||||
kubeObjects: []runtime.Object{
|
||||
legacyAgentPodWithoutExtraLabel, // should not be delete (missing extra label)
|
||||
legacyAgentPodWithExtraLabel, // should be deleted
|
||||
nonLegacyAgentPod, // should not be deleted (missing legacy agent label)
|
||||
},
|
||||
addKubeReactions: func(clientset *kubefake.Clientset) {
|
||||
clientset.PrependReactor("delete", "*", func(action coretesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, k8serrors.NewNotFound(action.GetResource().GroupResource(), "")
|
||||
})
|
||||
},
|
||||
wantDistinctErrors: []string{""},
|
||||
wantActions: []coretesting.Action{
|
||||
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
kubeClientset := kubefake.NewSimpleClientset(tt.kubeObjects...)
|
||||
if tt.addKubeReactions != nil {
|
||||
tt.addKubeReactions(kubeClientset)
|
||||
}
|
||||
kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0)
|
||||
log := testlogger.New(t)
|
||||
controller := NewLegacyPodCleanerController(
|
||||
AgentConfig{
|
||||
Namespace: "concierge",
|
||||
Labels: map[string]string{"extralabel": "labelvalue"},
|
||||
},
|
||||
&kubeclient.Client{Kubernetes: kubeClientset},
|
||||
kubeInformers.Core().V1().Pods(),
|
||||
log,
|
||||
controllerlib.WithMaxRetries(1),
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
errorMessages := runControllerUntilQuiet(ctx, t, controller, kubeInformers)
|
||||
assert.Equal(t, tt.wantDistinctErrors, deduplicate(errorMessages), "unexpected errors")
|
||||
assert.Equal(t, tt.wantDistinctLogs, deduplicate(log.Lines()), "unexpected logs")
|
||||
assert.Equal(t, tt.wantActions, kubeClientset.Actions()[2:], "unexpected actions")
|
||||
})
|
||||
}
|
||||
}
|
||||
7
internal/controller/kubecertagent/mocks/generate.go
Normal file
7
internal/controller/kubecertagent/mocks/generate.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package mocks
|
||||
|
||||
//go:generate go run -v github.com/golang/mock/mockgen -destination=mockpodcommandexecutor.go -package=mocks -copyright_file=../../../../hack/header.txt go.pinniped.dev/internal/controller/kubecertagent PodCommandExecutor
|
||||
//go:generate go run -v github.com/golang/mock/mockgen -destination=mockdynamiccert.go -package=mocks -copyright_file=../../../../hack/header.txt -mock_names Private=MockDynamicCertPrivate go.pinniped.dev/internal/dynamiccert Private
|
||||
132
internal/controller/kubecertagent/mocks/mockdynamiccert.go
Normal file
132
internal/controller/kubecertagent/mocks/mockdynamiccert.go
Normal file
@@ -0,0 +1,132 @@
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: go.pinniped.dev/internal/dynamiccert (interfaces: Private)
|
||||
|
||||
// Package mocks is a generated GoMock package.
|
||||
package mocks
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
dynamiccertificates "k8s.io/apiserver/pkg/server/dynamiccertificates"
|
||||
)
|
||||
|
||||
// MockDynamicCertPrivate is a mock of Private interface.
|
||||
type MockDynamicCertPrivate struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockDynamicCertPrivateMockRecorder
|
||||
}
|
||||
|
||||
// MockDynamicCertPrivateMockRecorder is the mock recorder for MockDynamicCertPrivate.
|
||||
type MockDynamicCertPrivateMockRecorder struct {
|
||||
mock *MockDynamicCertPrivate
|
||||
}
|
||||
|
||||
// NewMockDynamicCertPrivate creates a new mock instance.
|
||||
func NewMockDynamicCertPrivate(ctrl *gomock.Controller) *MockDynamicCertPrivate {
|
||||
mock := &MockDynamicCertPrivate{ctrl: ctrl}
|
||||
mock.recorder = &MockDynamicCertPrivateMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockDynamicCertPrivate) EXPECT() *MockDynamicCertPrivateMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// AddListener mocks base method.
|
||||
func (m *MockDynamicCertPrivate) AddListener(arg0 dynamiccertificates.Listener) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "AddListener", arg0)
|
||||
}
|
||||
|
||||
// AddListener indicates an expected call of AddListener.
|
||||
func (mr *MockDynamicCertPrivateMockRecorder) AddListener(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddListener", reflect.TypeOf((*MockDynamicCertPrivate)(nil).AddListener), arg0)
|
||||
}
|
||||
|
||||
// CurrentCertKeyContent mocks base method.
|
||||
func (m *MockDynamicCertPrivate) CurrentCertKeyContent() ([]byte, []byte) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CurrentCertKeyContent")
|
||||
ret0, _ := ret[0].([]byte)
|
||||
ret1, _ := ret[1].([]byte)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// CurrentCertKeyContent indicates an expected call of CurrentCertKeyContent.
|
||||
func (mr *MockDynamicCertPrivateMockRecorder) CurrentCertKeyContent() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentCertKeyContent", reflect.TypeOf((*MockDynamicCertPrivate)(nil).CurrentCertKeyContent))
|
||||
}
|
||||
|
||||
// Name mocks base method.
|
||||
func (m *MockDynamicCertPrivate) Name() string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Name")
|
||||
ret0, _ := ret[0].(string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Name indicates an expected call of Name.
|
||||
func (mr *MockDynamicCertPrivateMockRecorder) Name() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockDynamicCertPrivate)(nil).Name))
|
||||
}
|
||||
|
||||
// Run mocks base method.
|
||||
func (m *MockDynamicCertPrivate) Run(arg0 int, arg1 <-chan struct{}) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "Run", arg0, arg1)
|
||||
}
|
||||
|
||||
// Run indicates an expected call of Run.
|
||||
func (mr *MockDynamicCertPrivateMockRecorder) Run(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockDynamicCertPrivate)(nil).Run), arg0, arg1)
|
||||
}
|
||||
|
||||
// RunOnce mocks base method.
|
||||
func (m *MockDynamicCertPrivate) RunOnce() error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RunOnce")
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// RunOnce indicates an expected call of RunOnce.
|
||||
func (mr *MockDynamicCertPrivateMockRecorder) RunOnce() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunOnce", reflect.TypeOf((*MockDynamicCertPrivate)(nil).RunOnce))
|
||||
}
|
||||
|
||||
// SetCertKeyContent mocks base method.
|
||||
func (m *MockDynamicCertPrivate) SetCertKeyContent(arg0, arg1 []byte) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SetCertKeyContent", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// SetCertKeyContent indicates an expected call of SetCertKeyContent.
|
||||
func (mr *MockDynamicCertPrivateMockRecorder) SetCertKeyContent(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCertKeyContent", reflect.TypeOf((*MockDynamicCertPrivate)(nil).SetCertKeyContent), arg0, arg1)
|
||||
}
|
||||
|
||||
// UnsetCertKeyContent mocks base method.
|
||||
func (m *MockDynamicCertPrivate) UnsetCertKeyContent() {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "UnsetCertKeyContent")
|
||||
}
|
||||
|
||||
// UnsetCertKeyContent indicates an expected call of UnsetCertKeyContent.
|
||||
func (mr *MockDynamicCertPrivateMockRecorder) UnsetCertKeyContent() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnsetCertKeyContent", reflect.TypeOf((*MockDynamicCertPrivate)(nil).UnsetCertKeyContent))
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: go.pinniped.dev/internal/controller/kubecertagent (interfaces: PodCommandExecutor)
|
||||
|
||||
// Package mocks is a generated GoMock package.
|
||||
package mocks
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
)
|
||||
|
||||
// MockPodCommandExecutor is a mock of PodCommandExecutor interface.
|
||||
type MockPodCommandExecutor struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockPodCommandExecutorMockRecorder
|
||||
}
|
||||
|
||||
// MockPodCommandExecutorMockRecorder is the mock recorder for MockPodCommandExecutor.
|
||||
type MockPodCommandExecutorMockRecorder struct {
|
||||
mock *MockPodCommandExecutor
|
||||
}
|
||||
|
||||
// NewMockPodCommandExecutor creates a new mock instance.
|
||||
func NewMockPodCommandExecutor(ctrl *gomock.Controller) *MockPodCommandExecutor {
|
||||
mock := &MockPodCommandExecutor{ctrl: ctrl}
|
||||
mock.recorder = &MockPodCommandExecutorMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockPodCommandExecutor) EXPECT() *MockPodCommandExecutorMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Exec mocks base method.
|
||||
func (m *MockPodCommandExecutor) Exec(arg0, arg1 string, arg2 ...string) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{arg0, arg1}
|
||||
for _, a := range arg2 {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "Exec", varargs...)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Exec indicates an expected call of Exec.
|
||||
func (mr *MockPodCommandExecutorMockRecorder) Exec(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockPodCommandExecutor)(nil).Exec), varargs...)
|
||||
}
|
||||
@@ -30,6 +30,7 @@ func NewPodCommandExecutor(kubeConfig *restclient.Config, kubeClient kubernetes.
|
||||
}
|
||||
|
||||
func (s *kubeClientPodCommandExecutor) Exec(podNamespace string, podName string, commandAndArgs ...string) (string, error) {
|
||||
// TODO: see if we can add a timeout or make this cancelable somehow
|
||||
request := s.kubeClient.
|
||||
CoreV1().
|
||||
RESTClient().
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
@@ -269,11 +270,17 @@ func (c *controller) validateIssuer(ctx context.Context, upstream *v1alpha1.OIDC
|
||||
|
||||
discoveredProvider, err = oidc.NewProvider(oidc.ClientContext(ctx, httpClient), upstream.Spec.Issuer)
|
||||
if err != nil {
|
||||
const klogLevelTrace = 6
|
||||
c.log.V(klogLevelTrace).WithValues(
|
||||
"namespace", upstream.Namespace,
|
||||
"name", upstream.Name,
|
||||
"issuer", upstream.Spec.Issuer,
|
||||
).Error(err, "failed to perform OIDC discovery")
|
||||
return &v1alpha1.Condition{
|
||||
Type: typeOIDCDiscoverySucceeded,
|
||||
Status: v1alpha1.ConditionFalse,
|
||||
Reason: reasonUnreachable,
|
||||
Message: fmt.Sprintf("failed to perform OIDC discovery against %q", upstream.Spec.Issuer),
|
||||
Message: fmt.Sprintf("failed to perform OIDC discovery against %q:\n%s", upstream.Spec.Issuer, truncateNonOIDCErr(err)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -419,3 +426,14 @@ func computeScopes(additionalScopes []string) []string {
|
||||
sort.Strings(scopes)
|
||||
return scopes
|
||||
}
|
||||
|
||||
func truncateNonOIDCErr(err error) string {
|
||||
const max = 100
|
||||
msg := err.Error()
|
||||
|
||||
if len(msg) <= max || strings.HasPrefix(msg, "oidc:") {
|
||||
return msg
|
||||
}
|
||||
|
||||
return msg[:max] + fmt.Sprintf(" [truncated %d chars]", len(msg)-max)
|
||||
}
|
||||
|
||||
@@ -370,7 +370,7 @@ func TestController(t *testing.T) {
|
||||
inputUpstreams: []runtime.Object{&v1alpha1.OIDCIdentityProvider{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName},
|
||||
Spec: v1alpha1.OIDCIdentityProviderSpec{
|
||||
Issuer: "invalid-url",
|
||||
Issuer: "invalid-url-that-is-really-really-long",
|
||||
Client: v1alpha1.OIDCClient{SecretName: testSecretName},
|
||||
AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{AdditionalScopes: testAdditionalScopes},
|
||||
},
|
||||
@@ -382,9 +382,10 @@ func TestController(t *testing.T) {
|
||||
}},
|
||||
wantErr: controllerlib.ErrSyntheticRequeue.Error(),
|
||||
wantLogs: []string{
|
||||
`upstream-observer "msg"="failed to perform OIDC discovery" "error"="Get \"invalid-url-that-is-really-really-long/.well-known/openid-configuration\": unsupported protocol scheme \"\"" "issuer"="invalid-url-that-is-really-really-long" "name"="test-name" "namespace"="test-namespace"`,
|
||||
`upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="loaded client credentials" "reason"="Success" "status"="True" "type"="ClientCredentialsValid"`,
|
||||
`upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="failed to perform OIDC discovery against \"invalid-url\"" "reason"="Unreachable" "status"="False" "type"="OIDCDiscoverySucceeded"`,
|
||||
`upstream-observer "msg"="found failing condition" "error"="OIDCIdentityProvider has a failing condition" "message"="failed to perform OIDC discovery against \"invalid-url\"" "name"="test-name" "namespace"="test-namespace" "reason"="Unreachable" "type"="OIDCDiscoverySucceeded"`,
|
||||
`upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="failed to perform OIDC discovery against \"invalid-url-that-is-really-really-long\":\nGet \"invalid-url-that-is-really-really-long/.well-known/openid-configuration\": unsupported protocol [truncated 9 chars]" "reason"="Unreachable" "status"="False" "type"="OIDCDiscoverySucceeded"`,
|
||||
`upstream-observer "msg"="found failing condition" "error"="OIDCIdentityProvider has a failing condition" "message"="failed to perform OIDC discovery against \"invalid-url-that-is-really-really-long\":\nGet \"invalid-url-that-is-really-really-long/.well-known/openid-configuration\": unsupported protocol [truncated 9 chars]" "name"="test-name" "namespace"="test-namespace" "reason"="Unreachable" "type"="OIDCDiscoverySucceeded"`,
|
||||
},
|
||||
wantResultingCache: []provider.UpstreamOIDCIdentityProviderI{},
|
||||
wantResultingUpstreams: []v1alpha1.OIDCIdentityProvider{{
|
||||
@@ -404,7 +405,8 @@ func TestController(t *testing.T) {
|
||||
Status: "False",
|
||||
LastTransitionTime: now,
|
||||
Reason: "Unreachable",
|
||||
Message: `failed to perform OIDC discovery against "invalid-url"`,
|
||||
Message: `failed to perform OIDC discovery against "invalid-url-that-is-really-really-long":
|
||||
Get "invalid-url-that-is-really-really-long/.well-known/openid-configuration": unsupported protocol [truncated 9 chars]`,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -600,6 +602,151 @@ func TestController(t *testing.T) {
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "existing valid upstream with trailing slash",
|
||||
inputUpstreams: []runtime.Object{&v1alpha1.OIDCIdentityProvider{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName, Generation: 1234},
|
||||
Spec: v1alpha1.OIDCIdentityProviderSpec{
|
||||
Issuer: testIssuerURL + "/ends-with-slash/",
|
||||
TLS: &v1alpha1.TLSSpec{CertificateAuthorityData: testIssuerCABase64},
|
||||
Client: v1alpha1.OIDCClient{SecretName: testSecretName},
|
||||
AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{AdditionalScopes: testAdditionalScopes},
|
||||
Claims: v1alpha1.OIDCClaims{Groups: testGroupsClaim, Username: testUsernameClaim},
|
||||
},
|
||||
Status: v1alpha1.OIDCIdentityProviderStatus{
|
||||
Phase: "Ready",
|
||||
Conditions: []v1alpha1.Condition{
|
||||
{Type: "ClientCredentialsValid", Status: "True", LastTransitionTime: earlier, Reason: "Success", Message: "loaded client credentials"},
|
||||
{Type: "OIDCDiscoverySucceeded", Status: "True", LastTransitionTime: earlier, Reason: "Success", Message: "discovered issuer configuration"},
|
||||
},
|
||||
},
|
||||
}},
|
||||
inputSecrets: []runtime.Object{&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testSecretName},
|
||||
Type: "secrets.pinniped.dev/oidc-client",
|
||||
Data: testValidSecretData,
|
||||
}},
|
||||
wantLogs: []string{
|
||||
`upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="loaded client credentials" "reason"="Success" "status"="True" "type"="ClientCredentialsValid"`,
|
||||
`upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="discovered issuer configuration" "reason"="Success" "status"="True" "type"="OIDCDiscoverySucceeded"`,
|
||||
},
|
||||
wantResultingCache: []provider.UpstreamOIDCIdentityProviderI{
|
||||
&oidctestutil.TestUpstreamOIDCIdentityProvider{
|
||||
Name: testName,
|
||||
ClientID: testClientID,
|
||||
AuthorizationURL: *testIssuerAuthorizeURL,
|
||||
Scopes: testExpectedScopes,
|
||||
UsernameClaim: testUsernameClaim,
|
||||
GroupsClaim: testGroupsClaim,
|
||||
},
|
||||
},
|
||||
wantResultingUpstreams: []v1alpha1.OIDCIdentityProvider{{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName, Generation: 1234},
|
||||
Status: v1alpha1.OIDCIdentityProviderStatus{
|
||||
Phase: "Ready",
|
||||
Conditions: []v1alpha1.Condition{
|
||||
{Type: "ClientCredentialsValid", Status: "True", LastTransitionTime: earlier, Reason: "Success", Message: "loaded client credentials", ObservedGeneration: 1234},
|
||||
{Type: "OIDCDiscoverySucceeded", Status: "True", LastTransitionTime: earlier, Reason: "Success", Message: "discovered issuer configuration", ObservedGeneration: 1234},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "issuer is invalid URL, missing trailing slash",
|
||||
inputUpstreams: []runtime.Object{&v1alpha1.OIDCIdentityProvider{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName},
|
||||
Spec: v1alpha1.OIDCIdentityProviderSpec{
|
||||
Issuer: testIssuerURL + "/ends-with-slash",
|
||||
TLS: &v1alpha1.TLSSpec{CertificateAuthorityData: testIssuerCABase64},
|
||||
Client: v1alpha1.OIDCClient{SecretName: testSecretName},
|
||||
AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{AdditionalScopes: testAdditionalScopes},
|
||||
},
|
||||
}},
|
||||
inputSecrets: []runtime.Object{&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testSecretName},
|
||||
Type: "secrets.pinniped.dev/oidc-client",
|
||||
Data: testValidSecretData,
|
||||
}},
|
||||
wantErr: controllerlib.ErrSyntheticRequeue.Error(),
|
||||
wantLogs: []string{
|
||||
`upstream-observer "msg"="failed to perform OIDC discovery" "error"="oidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/ends-with-slash\" got \"` + testIssuerURL + `/ends-with-slash/\"" "issuer"="` + testIssuerURL + `/ends-with-slash" "name"="test-name" "namespace"="test-namespace"`,
|
||||
`upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="loaded client credentials" "reason"="Success" "status"="True" "type"="ClientCredentialsValid"`,
|
||||
`upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="failed to perform OIDC discovery against \"` + testIssuerURL + `/ends-with-slash\":\noidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/ends-with-slash\" got \"` + testIssuerURL + `/ends-with-slash/\"" "reason"="Unreachable" "status"="False" "type"="OIDCDiscoverySucceeded"`,
|
||||
`upstream-observer "msg"="found failing condition" "error"="OIDCIdentityProvider has a failing condition" "message"="failed to perform OIDC discovery against \"` + testIssuerURL + `/ends-with-slash\":\noidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/ends-with-slash\" got \"` + testIssuerURL + `/ends-with-slash/\"" "name"="test-name" "namespace"="test-namespace" "reason"="Unreachable" "type"="OIDCDiscoverySucceeded"`,
|
||||
},
|
||||
wantResultingCache: []provider.UpstreamOIDCIdentityProviderI{},
|
||||
wantResultingUpstreams: []v1alpha1.OIDCIdentityProvider{{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName},
|
||||
Status: v1alpha1.OIDCIdentityProviderStatus{
|
||||
Phase: "Error",
|
||||
Conditions: []v1alpha1.Condition{
|
||||
{
|
||||
Type: "ClientCredentialsValid",
|
||||
Status: "True",
|
||||
LastTransitionTime: now,
|
||||
Reason: "Success",
|
||||
Message: "loaded client credentials",
|
||||
},
|
||||
{
|
||||
Type: "OIDCDiscoverySucceeded",
|
||||
Status: "False",
|
||||
LastTransitionTime: now,
|
||||
Reason: "Unreachable",
|
||||
Message: `failed to perform OIDC discovery against "` + testIssuerURL + `/ends-with-slash":
|
||||
oidc: issuer did not match the issuer returned by provider, expected "` + testIssuerURL + `/ends-with-slash" got "` + testIssuerURL + `/ends-with-slash/"`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "issuer is invalid URL, extra trailing slash",
|
||||
inputUpstreams: []runtime.Object{&v1alpha1.OIDCIdentityProvider{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName},
|
||||
Spec: v1alpha1.OIDCIdentityProviderSpec{
|
||||
Issuer: testIssuerURL + "/",
|
||||
TLS: &v1alpha1.TLSSpec{CertificateAuthorityData: testIssuerCABase64},
|
||||
Client: v1alpha1.OIDCClient{SecretName: testSecretName},
|
||||
AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{AdditionalScopes: testAdditionalScopes},
|
||||
},
|
||||
}},
|
||||
inputSecrets: []runtime.Object{&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testSecretName},
|
||||
Type: "secrets.pinniped.dev/oidc-client",
|
||||
Data: testValidSecretData,
|
||||
}},
|
||||
wantErr: controllerlib.ErrSyntheticRequeue.Error(),
|
||||
wantLogs: []string{
|
||||
`upstream-observer "msg"="failed to perform OIDC discovery" "error"="oidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/\" got \"` + testIssuerURL + `\"" "issuer"="` + testIssuerURL + `/" "name"="test-name" "namespace"="test-namespace"`,
|
||||
`upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="loaded client credentials" "reason"="Success" "status"="True" "type"="ClientCredentialsValid"`,
|
||||
`upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="failed to perform OIDC discovery against \"` + testIssuerURL + `/\":\noidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/\" got \"` + testIssuerURL + `\"" "reason"="Unreachable" "status"="False" "type"="OIDCDiscoverySucceeded"`,
|
||||
`upstream-observer "msg"="found failing condition" "error"="OIDCIdentityProvider has a failing condition" "message"="failed to perform OIDC discovery against \"` + testIssuerURL + `/\":\noidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/\" got \"` + testIssuerURL + `\"" "name"="test-name" "namespace"="test-namespace" "reason"="Unreachable" "type"="OIDCDiscoverySucceeded"`,
|
||||
},
|
||||
wantResultingCache: []provider.UpstreamOIDCIdentityProviderI{},
|
||||
wantResultingUpstreams: []v1alpha1.OIDCIdentityProvider{{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName},
|
||||
Status: v1alpha1.OIDCIdentityProviderStatus{
|
||||
Phase: "Error",
|
||||
Conditions: []v1alpha1.Condition{
|
||||
{
|
||||
Type: "ClientCredentialsValid",
|
||||
Status: "True",
|
||||
LastTransitionTime: now,
|
||||
Reason: "Success",
|
||||
Message: "loaded client credentials",
|
||||
},
|
||||
{
|
||||
Type: "OIDCDiscoverySucceeded",
|
||||
Status: "False",
|
||||
LastTransitionTime: now,
|
||||
Reason: "Unreachable",
|
||||
Message: `failed to perform OIDC discovery against "` + testIssuerURL + `/":
|
||||
oidc: issuer did not match the issuer returned by provider, expected "` + testIssuerURL + `/" got "` + testIssuerURL + `"`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
@@ -728,5 +875,25 @@ func newTestIssuer(t *testing.T) (string, string) {
|
||||
})
|
||||
})
|
||||
|
||||
// handle the four issuer with trailing slash configs
|
||||
|
||||
// valid case in= out=
|
||||
// handled above at the root of testURL
|
||||
|
||||
// valid case in=/ out=/
|
||||
mux.HandleFunc("/ends-with-slash/.well-known/openid-configuration", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(&providerJSON{
|
||||
Issuer: testURL + "/ends-with-slash/",
|
||||
AuthURL: "https://example.com/authorize",
|
||||
})
|
||||
})
|
||||
|
||||
// invalid case in= out=/
|
||||
// can be tested using /ends-with-slash/ endpoint
|
||||
|
||||
// invalid case in=/ out=
|
||||
// can be tested using root endpoint
|
||||
|
||||
return caBundlePEM, testURL
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ func GarbageCollectorController(
|
||||
return isSecretWithGCAnnotation(oldObj) || isSecretWithGCAnnotation(newObj)
|
||||
},
|
||||
DeleteFunc: func(obj metav1.Object) bool { return false }, // ignore all deletes
|
||||
ParentFunc: nil,
|
||||
ParentFunc: pinnipedcontroller.SingletonQueue(),
|
||||
},
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
@@ -67,16 +67,20 @@ func GarbageCollectorController(
|
||||
}
|
||||
|
||||
func (c *garbageCollectorController) Sync(ctx controllerlib.Context) error {
|
||||
// make sure we have a consistent, static meaning for the current time during the sync loop
|
||||
frozenClock := clock.NewFakeClock(c.clock.Now())
|
||||
|
||||
// The Sync method is triggered upon any change to any Secret, which would make this
|
||||
// controller too chatty, so it rate limits itself to a more reasonable interval.
|
||||
// Note that even during a period when no secrets are changing, it will still run
|
||||
// at the informer's full-resync interval (as long as there are some secrets).
|
||||
if c.clock.Now().Sub(c.timeOfMostRecentSweep) < minimumRepeatInterval {
|
||||
if since := frozenClock.Since(c.timeOfMostRecentSweep); since < minimumRepeatInterval {
|
||||
ctx.Queue.AddAfter(ctx.Key, minimumRepeatInterval-since)
|
||||
return nil
|
||||
}
|
||||
|
||||
plog.Info("starting storage garbage collection sweep")
|
||||
c.timeOfMostRecentSweep = c.clock.Now()
|
||||
c.timeOfMostRecentSweep = frozenClock.Now()
|
||||
|
||||
listOfSecrets, err := c.secretInformer.Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
@@ -97,7 +101,7 @@ func (c *garbageCollectorController) Sync(ctx controllerlib.Context) error {
|
||||
continue
|
||||
}
|
||||
|
||||
if garbageCollectAfterTime.Before(c.clock.Now()) {
|
||||
if garbageCollectAfterTime.Before(frozenClock.Now()) {
|
||||
err = c.kubeClient.CoreV1().Secrets(secret.Namespace).Delete(ctx.Context, secret.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
plog.WarningErr("failed to garbage collect resource", err, logKV(secret))
|
||||
|
||||
@@ -66,6 +66,10 @@ func TestGarbageCollectorControllerInformerFilters(t *testing.T) {
|
||||
r.True(subject.Update(secretWithAnnotation, otherSecret))
|
||||
r.True(subject.Update(otherSecret, secretWithAnnotation))
|
||||
})
|
||||
|
||||
it("returns the same singleton key", func() {
|
||||
r.Equal(controllerlib.Key{}, subject.Parent(secretWithAnnotation))
|
||||
})
|
||||
})
|
||||
|
||||
when("any Secret with the required annotation is deleted", func() {
|
||||
@@ -136,9 +140,10 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
||||
Context: cancelContext,
|
||||
Name: subject.Name(),
|
||||
Key: controllerlib.Key{
|
||||
Namespace: "",
|
||||
Name: "",
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
Queue: &testQueue{t: t},
|
||||
}
|
||||
|
||||
// Must start informers before calling TestRunSynchronously()
|
||||
@@ -262,16 +267,23 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
||||
// Run sync once with the current time set to frozenTime.
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
require.Empty(t, kubeClient.Actions())
|
||||
r.False(syncContext.Queue.(*testQueue).called)
|
||||
|
||||
// Run sync again when not enough time has passed since the most recent run, so no delete
|
||||
// operations should happen even though there is a expired secret now.
|
||||
fakeClock.Step(29 * time.Second)
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
require.Empty(t, kubeClient.Actions())
|
||||
r.True(syncContext.Queue.(*testQueue).called)
|
||||
r.Equal(controllerlib.Key{Namespace: "foo", Name: "bar"}, syncContext.Queue.(*testQueue).key) // assert key is passed through
|
||||
r.Equal(time.Second, syncContext.Queue.(*testQueue).duration) // assert that we get the exact requeue time
|
||||
|
||||
syncContext.Queue = &testQueue{t: t} // reset the queue for the next sync
|
||||
|
||||
// Step to the exact threshold and run Sync again. Now we are past the rate limiting period.
|
||||
fakeClock.Step(1*time.Second + 1*time.Millisecond)
|
||||
fakeClock.Step(time.Second)
|
||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
||||
r.False(syncContext.Queue.(*testQueue).called)
|
||||
|
||||
// It should have deleted the expired secret.
|
||||
r.ElementsMatch(
|
||||
@@ -381,3 +393,23 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
|
||||
type testQueue struct {
|
||||
t *testing.T
|
||||
|
||||
called bool
|
||||
key controllerlib.Key
|
||||
duration time.Duration
|
||||
|
||||
controllerlib.Queue // panic if any other methods called
|
||||
}
|
||||
|
||||
func (q *testQueue) AddAfter(key controllerlib.Key, duration time.Duration) {
|
||||
q.t.Helper()
|
||||
|
||||
require.False(q.t, q.called, "AddAfter should only be called once")
|
||||
|
||||
q.called = true
|
||||
q.key = key
|
||||
q.duration = duration
|
||||
}
|
||||
|
||||
@@ -119,16 +119,15 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
// Create informers. Don't forget to make sure they get started in the function returned below.
|
||||
informers := createInformers(c.ServerInstallationInfo.Namespace, client.Kubernetes, client.PinnipedConcierge)
|
||||
|
||||
// Configuration for the kubecertagent controllers created below.
|
||||
agentPodConfig := &kubecertagent.AgentPodConfig{
|
||||
agentConfig := kubecertagent.AgentConfig{
|
||||
Namespace: c.ServerInstallationInfo.Namespace,
|
||||
ServiceAccountName: c.NamesConfig.AgentServiceAccount,
|
||||
ContainerImage: *c.KubeCertAgentConfig.Image,
|
||||
PodNamePrefix: *c.KubeCertAgentConfig.NamePrefix,
|
||||
NamePrefix: *c.KubeCertAgentConfig.NamePrefix,
|
||||
ContainerImagePullSecrets: c.KubeCertAgentConfig.ImagePullSecrets,
|
||||
AdditionalLabels: c.Labels,
|
||||
}
|
||||
credentialIssuerLocationConfig := &kubecertagent.CredentialIssuerLocationConfig{
|
||||
Name: c.NamesConfig.CredentialIssuer,
|
||||
Labels: c.Labels,
|
||||
CredentialIssuerName: c.NamesConfig.CredentialIssuer,
|
||||
DiscoveryURLOverride: c.DiscoveryURLOverride,
|
||||
}
|
||||
|
||||
// Create controller manager.
|
||||
@@ -195,64 +194,31 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
),
|
||||
singletonWorker,
|
||||
).
|
||||
|
||||
// Kube cert agent controllers are responsible for finding the cluster's signing keys and keeping them
|
||||
// The kube-cert-agent controller is responsible for finding the cluster's signing keys and keeping them
|
||||
// up to date in memory, as well as reporting status on this cluster integration strategy.
|
||||
WithController(
|
||||
kubecertagent.NewCreaterController(
|
||||
agentPodConfig,
|
||||
credentialIssuerLocationConfig,
|
||||
c.Labels,
|
||||
clock.RealClock{},
|
||||
client.Kubernetes,
|
||||
client.PinnipedConcierge,
|
||||
kubecertagent.NewAgentController(
|
||||
agentConfig,
|
||||
client,
|
||||
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
||||
controllerlib.WithInformer,
|
||||
controllerlib.WithInitialEvent,
|
||||
),
|
||||
singletonWorker,
|
||||
).
|
||||
WithController(
|
||||
kubecertagent.NewAnnotaterController(
|
||||
agentPodConfig,
|
||||
credentialIssuerLocationConfig,
|
||||
c.Labels,
|
||||
clock.RealClock{},
|
||||
client.Kubernetes,
|
||||
client.PinnipedConcierge,
|
||||
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
||||
controllerlib.WithInformer,
|
||||
),
|
||||
singletonWorker,
|
||||
).
|
||||
WithController(
|
||||
kubecertagent.NewExecerController(
|
||||
credentialIssuerLocationConfig,
|
||||
c.Labels,
|
||||
c.DiscoveryURLOverride,
|
||||
c.DynamicSigningCertProvider,
|
||||
kubecertagent.NewPodCommandExecutor(client.JSONConfig, client.Kubernetes),
|
||||
client.PinnipedConcierge,
|
||||
clock.RealClock{},
|
||||
informers.installationNamespaceK8s.Apps().V1().Deployments(),
|
||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
||||
informers.kubePublicNamespaceK8s.Core().V1().ConfigMaps(),
|
||||
controllerlib.WithInformer,
|
||||
c.DynamicSigningCertProvider,
|
||||
),
|
||||
singletonWorker,
|
||||
).
|
||||
// The kube-cert-agent legacy pod cleaner controller is responsible for cleaning up pods that were deployed by
|
||||
// versions of Pinniped prior to v0.7.0. If we stop supporting upgrades from v0.7.0, we can safely remove this.
|
||||
WithController(
|
||||
kubecertagent.NewDeleterController(
|
||||
agentPodConfig,
|
||||
client.Kubernetes,
|
||||
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
||||
kubecertagent.NewLegacyPodCleanerController(
|
||||
agentConfig,
|
||||
client,
|
||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
||||
controllerlib.WithInformer,
|
||||
klogr.New(),
|
||||
),
|
||||
singletonWorker,
|
||||
).
|
||||
|
||||
// The cache filler/cleaner controllers are responsible for keep an in-memory representation of active
|
||||
// authenticators up to date.
|
||||
WithController(
|
||||
|
||||
127
internal/execcredcache/cachefile.go
Normal file
127
internal/execcredcache/cachefile.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package execcredcache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
// errUnsupportedVersion is returned (internally) when we encounter a version of the cache file that we
|
||||
// don't understand how to handle (such as one produced by a future version of Pinniped).
|
||||
errUnsupportedVersion = fmt.Errorf("unsupported credential cache version")
|
||||
)
|
||||
|
||||
const (
|
||||
// apiVersion is the Kubernetes-style API version of the credential cache file object.
|
||||
apiVersion = "config.supervisor.pinniped.dev/v1alpha1"
|
||||
|
||||
// apiKind is the Kubernetes-style Kind of the credential cache file object.
|
||||
apiKind = "CredentialCache"
|
||||
|
||||
// maxCacheDuration is how long a credential can remain in the cache even if it's still otherwise valid.
|
||||
maxCacheDuration = 1 * time.Hour
|
||||
)
|
||||
|
||||
type (
|
||||
// credCache is the object which is YAML-serialized to form the contents of the cache file.
|
||||
credCache struct {
|
||||
metav1.TypeMeta
|
||||
Entries []entry `json:"credentials"`
|
||||
}
|
||||
|
||||
// entry is a single credential in the cache file.
|
||||
entry struct {
|
||||
Key string `json:"key"`
|
||||
CreationTimestamp metav1.Time `json:"creationTimestamp"`
|
||||
LastUsedTimestamp metav1.Time `json:"lastUsedTimestamp"`
|
||||
Credential *clientauthenticationv1beta1.ExecCredentialStatus `json:"credential"`
|
||||
}
|
||||
)
|
||||
|
||||
// readCache loads a credCache from a path on disk. If the requested path does not exist, it returns an empty cache.
|
||||
func readCache(path string) (*credCache, error) {
|
||||
cacheYAML, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// If the file was not found, generate a freshly initialized empty cache.
|
||||
return emptyCache(), nil
|
||||
}
|
||||
// Otherwise bubble up the error.
|
||||
return nil, fmt.Errorf("could not read cache file: %w", err)
|
||||
}
|
||||
|
||||
// If we read the file successfully, unmarshal it from YAML.
|
||||
var cache credCache
|
||||
if err := yaml.Unmarshal(cacheYAML, &cache); err != nil {
|
||||
return nil, fmt.Errorf("invalid cache file: %w", err)
|
||||
}
|
||||
|
||||
// Validate that we're reading a version of the config we understand how to parse.
|
||||
if !(cache.TypeMeta.APIVersion == apiVersion && cache.TypeMeta.Kind == apiKind) {
|
||||
return nil, fmt.Errorf("%w: %#v", errUnsupportedVersion, cache.TypeMeta)
|
||||
}
|
||||
return &cache, nil
|
||||
}
|
||||
|
||||
// emptyCache returns an empty, initialized credCache.
|
||||
func emptyCache() *credCache {
|
||||
return &credCache{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: apiVersion, Kind: apiKind},
|
||||
Entries: make([]entry, 0, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// writeTo writes the cache to the specified file path.
|
||||
func (c *credCache) writeTo(path string) error {
|
||||
// Marshal the cache back to YAML and save it to the file.
|
||||
cacheYAML, err := yaml.Marshal(c)
|
||||
if err == nil {
|
||||
err = ioutil.WriteFile(path, cacheYAML, 0600)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// normalized returns a copy of the credCache with stale entries removed and entries sorted in a canonical order.
|
||||
func (c *credCache) normalized() *credCache {
|
||||
result := emptyCache()
|
||||
|
||||
// Clean up expired/invalid tokens.
|
||||
now := time.Now()
|
||||
result.Entries = make([]entry, 0, len(c.Entries))
|
||||
|
||||
for _, e := range c.Entries {
|
||||
// Eliminate any cache entries that are missing a credential or an expiration timestamp.
|
||||
if e.Credential == nil || e.Credential.ExpirationTimestamp == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Eliminate any expired credentials.
|
||||
if e.Credential.ExpirationTimestamp.Time.Before(time.Now()) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Eliminate any entries older than maxCacheDuration.
|
||||
if e.CreationTimestamp.Time.Before(now.Add(-maxCacheDuration)) {
|
||||
continue
|
||||
}
|
||||
result.Entries = append(result.Entries, e)
|
||||
}
|
||||
|
||||
// Sort the entries by creation time.
|
||||
sort.SliceStable(result.Entries, func(i, j int) bool {
|
||||
return result.Entries[i].CreationTimestamp.Before(&result.Entries[j].CreationTimestamp)
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
207
internal/execcredcache/cachefile_test.go
Normal file
207
internal/execcredcache/cachefile_test.go
Normal file
@@ -0,0 +1,207 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package execcredcache
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
// validCache should be the same data as `testdata/valid.yaml`.
|
||||
validCache = credCache{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "config.supervisor.pinniped.dev/v1alpha1", Kind: "CredentialCache"},
|
||||
Entries: []entry{
|
||||
{
|
||||
Key: "test-key",
|
||||
CreationTimestamp: metav1.NewTime(time.Date(2020, 10, 20, 18, 42, 7, 0, time.UTC).Local()),
|
||||
LastUsedTimestamp: metav1.NewTime(time.Date(2020, 10, 20, 18, 45, 31, 0, time.UTC).Local()),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
Token: "test-token",
|
||||
ExpirationTimestamp: &expTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expTime = metav1.NewTime(time.Date(2020, 10, 20, 19, 46, 30, 0, time.UTC).Local())
|
||||
)
|
||||
|
||||
func TestReadCache(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
path string
|
||||
want *credCache
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "does not exist",
|
||||
path: "./testdata/does-not-exist.yaml",
|
||||
want: &credCache{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "config.supervisor.pinniped.dev/v1alpha1", Kind: "CredentialCache"},
|
||||
Entries: []entry{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "other file error",
|
||||
path: "./testdata/",
|
||||
wantErr: "could not read cache file: read ./testdata/: is a directory",
|
||||
},
|
||||
{
|
||||
name: "invalid YAML",
|
||||
path: "./testdata/invalid.yaml",
|
||||
wantErr: "invalid cache file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type execcredcache.credCache",
|
||||
},
|
||||
{
|
||||
name: "wrong version",
|
||||
path: "./testdata/wrong-version.yaml",
|
||||
wantErr: `unsupported credential cache version: v1.TypeMeta{Kind:"NotACredentialCache", APIVersion:"config.supervisor.pinniped.dev/v2alpha6"}`,
|
||||
},
|
||||
{
|
||||
name: "valid",
|
||||
path: "./testdata/valid.yaml",
|
||||
want: &validCache,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got, err := readCache(tt.path)
|
||||
if tt.wantErr != "" {
|
||||
require.EqualError(t, err, tt.wantErr)
|
||||
require.Nil(t, got)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, got)
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyCache(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := emptyCache()
|
||||
require.Equal(t, metav1.TypeMeta{APIVersion: "config.supervisor.pinniped.dev/v1alpha1", Kind: "CredentialCache"}, got.TypeMeta)
|
||||
require.Equal(t, 0, len(got.Entries))
|
||||
require.Equal(t, 1, cap(got.Entries))
|
||||
}
|
||||
|
||||
func TestWriteTo(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("io error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tmp := testutil.TempDir(t) + "/credentials.yaml"
|
||||
require.NoError(t, os.Mkdir(tmp, 0700))
|
||||
err := validCache.writeTo(tmp)
|
||||
require.EqualError(t, err, "open "+tmp+": is a directory")
|
||||
})
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
require.NoError(t, validCache.writeTo(testutil.TempDir(t)+"/credentials.yaml"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestNormalized(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
require.Equal(t, emptyCache(), emptyCache().normalized())
|
||||
})
|
||||
|
||||
t.Run("nonempty", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := emptyCache()
|
||||
now := time.Now()
|
||||
oneMinuteAgo := metav1.NewTime(now.Add(-1 * time.Minute))
|
||||
oneHourFromNow := metav1.NewTime(now.Add(1 * time.Hour))
|
||||
input.Entries = []entry{
|
||||
// Credential is nil.
|
||||
{
|
||||
Key: "nil-credential-key",
|
||||
LastUsedTimestamp: metav1.NewTime(now),
|
||||
Credential: nil,
|
||||
},
|
||||
// Credential's expiration is nil.
|
||||
{
|
||||
Key: "nil-expiration-key",
|
||||
LastUsedTimestamp: metav1.NewTime(now),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{},
|
||||
},
|
||||
// Credential is expired.
|
||||
{
|
||||
Key: "expired-key",
|
||||
LastUsedTimestamp: metav1.NewTime(now),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: &oneMinuteAgo,
|
||||
Token: "expired-token",
|
||||
},
|
||||
},
|
||||
// Credential is still valid but is older than maxCacheDuration.
|
||||
{
|
||||
Key: "too-old-key",
|
||||
LastUsedTimestamp: metav1.NewTime(now),
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-3 * time.Hour)),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: &oneHourFromNow,
|
||||
Token: "too-old-token",
|
||||
},
|
||||
},
|
||||
// Two entries that are still valid but are out of order.
|
||||
{
|
||||
Key: "key-two",
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-1 * time.Minute)),
|
||||
LastUsedTimestamp: metav1.NewTime(now),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: &oneHourFromNow,
|
||||
Token: "token-two",
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: "key-one",
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Minute)),
|
||||
LastUsedTimestamp: metav1.NewTime(now),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: &oneHourFromNow,
|
||||
Token: "token-one",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Expect that all but the last two valid entries are pruned, and that they're sorted.
|
||||
require.Equal(t, &credCache{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "config.supervisor.pinniped.dev/v1alpha1", Kind: "CredentialCache"},
|
||||
Entries: []entry{
|
||||
{
|
||||
Key: "key-one",
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Minute)),
|
||||
LastUsedTimestamp: metav1.NewTime(now),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: &oneHourFromNow,
|
||||
Token: "token-one",
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: "key-two",
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-1 * time.Minute)),
|
||||
LastUsedTimestamp: metav1.NewTime(now),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: &oneHourFromNow,
|
||||
Token: "token-two",
|
||||
},
|
||||
},
|
||||
},
|
||||
}, input.normalized())
|
||||
})
|
||||
}
|
||||
159
internal/execcredcache/execcredcache.go
Normal file
159
internal/execcredcache/execcredcache.go
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package execcredcache implements a cache for Kubernetes ExecCredential data.
|
||||
package execcredcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/gofrs/flock"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultFileLockTimeout is how long we will wait trying to acquire the file lock on the cache file before timing out.
|
||||
defaultFileLockTimeout = 10 * time.Second
|
||||
|
||||
// defaultFileLockRetryInterval is how often we will poll while waiting for the file lock to become available.
|
||||
defaultFileLockRetryInterval = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
type Cache struct {
|
||||
path string
|
||||
errReporter func(error)
|
||||
trylockFunc func() error
|
||||
unlockFunc func() error
|
||||
}
|
||||
|
||||
func New(path string) *Cache {
|
||||
lock := flock.New(path + ".lock")
|
||||
return &Cache{
|
||||
path: path,
|
||||
trylockFunc: func() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultFileLockTimeout)
|
||||
defer cancel()
|
||||
_, err := lock.TryLockContext(ctx, defaultFileLockRetryInterval)
|
||||
return err
|
||||
},
|
||||
unlockFunc: lock.Unlock,
|
||||
errReporter: func(_ error) {},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) Get(key interface{}) *clientauthenticationv1beta1.ExecCredential {
|
||||
// If the cache file does not exist, exit immediately with no error log
|
||||
if _, err := os.Stat(c.path); errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the cache and lookup the matching entry. If one exists, update its last used timestamp and return it.
|
||||
var result *clientauthenticationv1beta1.ExecCredential
|
||||
cacheKey := jsonSHA256Hex(key)
|
||||
c.withCache(func(cache *credCache) {
|
||||
// Find the existing entry, if one exists
|
||||
for i := range cache.Entries {
|
||||
if cache.Entries[i].Key == cacheKey {
|
||||
result = &clientauthenticationv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: cache.Entries[i].Credential,
|
||||
}
|
||||
|
||||
// Update the last-used timestamp.
|
||||
cache.Entries[i].LastUsedTimestamp = metav1.Now()
|
||||
break
|
||||
}
|
||||
}
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
func (c *Cache) Put(key interface{}, cred *clientauthenticationv1beta1.ExecCredential) {
|
||||
// Create the cache directory if it does not exist.
|
||||
if err := os.MkdirAll(filepath.Dir(c.path), 0700); err != nil && !errors.Is(err, os.ErrExist) {
|
||||
c.errReporter(fmt.Errorf("could not create credential cache directory: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Mutate the cache to upsert the new entry.
|
||||
cacheKey := jsonSHA256Hex(key)
|
||||
c.withCache(func(cache *credCache) {
|
||||
// Find the existing entry, if one exists
|
||||
for i := range cache.Entries {
|
||||
if cache.Entries[i].Key == cacheKey {
|
||||
// Update the stored entry and return.
|
||||
cache.Entries[i].Credential = cred.Status
|
||||
cache.Entries[i].LastUsedTimestamp = metav1.Now()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If there's not an entry for this key, insert one.
|
||||
now := metav1.Now()
|
||||
cache.Entries = append(cache.Entries, entry{
|
||||
Key: cacheKey,
|
||||
CreationTimestamp: now,
|
||||
LastUsedTimestamp: now,
|
||||
Credential: cred.Status,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func jsonSHA256Hex(key interface{}) string {
|
||||
hash := sha256.New()
|
||||
if err := json.NewEncoder(hash).Encode(key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
||||
// withCache is an internal helper which locks, reads the cache, processes/mutates it with the provided function, then
|
||||
// saves it back to the file.
|
||||
func (c *Cache) withCache(transact func(*credCache)) {
|
||||
// Grab the file lock so we have exclusive access to read the file.
|
||||
if err := c.trylockFunc(); err != nil {
|
||||
c.errReporter(fmt.Errorf("could not lock cache file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Unlock the file at the end of this call, bubbling up the error if things were otherwise successful.
|
||||
defer func() {
|
||||
if err := c.unlockFunc(); err != nil {
|
||||
c.errReporter(fmt.Errorf("could not unlock cache file: %w", err))
|
||||
}
|
||||
}()
|
||||
|
||||
// Try to read the existing cache.
|
||||
cache, err := readCache(c.path)
|
||||
if err != nil {
|
||||
// If that fails, fall back to resetting to a blank slate.
|
||||
c.errReporter(fmt.Errorf("failed to read cache, resetting: %w", err))
|
||||
cache = emptyCache()
|
||||
}
|
||||
|
||||
// Normalize the cache before modifying it, to remove any entries that have already expired.
|
||||
cache = cache.normalized()
|
||||
|
||||
// Process/mutate the cache using the provided function.
|
||||
transact(cache)
|
||||
|
||||
// Normalize again to put everything into a known order.
|
||||
cache = cache.normalized()
|
||||
|
||||
// Marshal the cache back to YAML and save it to the file.
|
||||
if err := cache.writeTo(c.path); err != nil {
|
||||
c.errReporter(fmt.Errorf("could not write cache: %w", err))
|
||||
}
|
||||
}
|
||||
389
internal/execcredcache/execcredcache_test.go
Normal file
389
internal/execcredcache/execcredcache_test.go
Normal file
@@ -0,0 +1,389 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package execcredcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
t.Parallel()
|
||||
tmp := testutil.TempDir(t) + "/credentials.yaml"
|
||||
c := New(tmp)
|
||||
require.NotNil(t, c)
|
||||
require.Equal(t, tmp, c.path)
|
||||
require.NotNil(t, c.errReporter)
|
||||
c.errReporter(fmt.Errorf("some error"))
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
t.Parallel()
|
||||
now := time.Now().Round(1 * time.Second)
|
||||
oneHourFromNow := metav1.NewTime(now.Add(1 * time.Hour))
|
||||
|
||||
type testKey struct{ K1, K2 string }
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
makeTestFile func(t *testing.T, tmp string)
|
||||
trylockFunc func(*testing.T) error
|
||||
unlockFunc func(*testing.T) error
|
||||
key testKey
|
||||
want *clientauthenticationv1beta1.ExecCredential
|
||||
wantErrors []string
|
||||
wantTestFile func(t *testing.T, tmp string)
|
||||
}{
|
||||
{
|
||||
name: "not found",
|
||||
key: testKey{},
|
||||
},
|
||||
{
|
||||
name: "file lock error",
|
||||
makeTestFile: func(t *testing.T, tmp string) { require.NoError(t, ioutil.WriteFile(tmp, []byte(""), 0600)) },
|
||||
trylockFunc: func(t *testing.T) error { return fmt.Errorf("some lock error") },
|
||||
unlockFunc: func(t *testing.T) error { require.Fail(t, "should not be called"); return nil },
|
||||
key: testKey{},
|
||||
wantErrors: []string{"could not lock cache file: some lock error"},
|
||||
},
|
||||
{
|
||||
name: "invalid file",
|
||||
makeTestFile: func(t *testing.T, tmp string) {
|
||||
require.NoError(t, ioutil.WriteFile(tmp, []byte("invalid yaml"), 0600))
|
||||
},
|
||||
key: testKey{},
|
||||
wantErrors: []string{
|
||||
"failed to read cache, resetting: invalid cache file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type execcredcache.credCache",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid file, fail to unlock",
|
||||
makeTestFile: func(t *testing.T, tmp string) { require.NoError(t, ioutil.WriteFile(tmp, []byte("invalid"), 0600)) },
|
||||
trylockFunc: func(t *testing.T) error { return nil },
|
||||
unlockFunc: func(t *testing.T) error { return fmt.Errorf("some unlock error") },
|
||||
key: testKey{},
|
||||
wantErrors: []string{
|
||||
"failed to read cache, resetting: invalid cache file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type execcredcache.credCache",
|
||||
"could not unlock cache file: some unlock error",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unreadable file",
|
||||
makeTestFile: func(t *testing.T, tmp string) {
|
||||
require.NoError(t, os.Mkdir(tmp, 0700))
|
||||
},
|
||||
key: testKey{},
|
||||
wantErrors: []string{
|
||||
"failed to read cache, resetting: could not read cache file: read TEMPFILE: is a directory",
|
||||
"could not write cache: open TEMPFILE: is a directory",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid file but cache miss",
|
||||
makeTestFile: func(t *testing.T, tmp string) {
|
||||
validCache := emptyCache()
|
||||
validCache.Entries = []entry{{
|
||||
Key: jsonSHA256Hex(testKey{K1: "v3", K2: "v4"}),
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Minute)),
|
||||
LastUsedTimestamp: metav1.NewTime(now.Add(-1 * time.Minute)),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
Token: "test-token",
|
||||
ExpirationTimestamp: &oneHourFromNow,
|
||||
},
|
||||
}}
|
||||
require.NoError(t, validCache.writeTo(tmp))
|
||||
},
|
||||
key: testKey{K1: "v1", K2: "v2"},
|
||||
wantErrors: []string{},
|
||||
},
|
||||
{
|
||||
name: "valid file but expired cache hit",
|
||||
makeTestFile: func(t *testing.T, tmp string) {
|
||||
validCache := emptyCache()
|
||||
oneMinuteAgo := metav1.NewTime(now.Add(-1 * time.Minute))
|
||||
validCache.Entries = []entry{{
|
||||
Key: jsonSHA256Hex(testKey{K1: "v1", K2: "v2"}),
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Minute)),
|
||||
LastUsedTimestamp: metav1.NewTime(now.Add(-1 * time.Minute)),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
Token: "test-token",
|
||||
ExpirationTimestamp: &oneMinuteAgo,
|
||||
},
|
||||
}}
|
||||
require.NoError(t, validCache.writeTo(tmp))
|
||||
},
|
||||
key: testKey{K1: "v1", K2: "v2"},
|
||||
wantErrors: []string{},
|
||||
},
|
||||
{
|
||||
name: "valid file with cache hit",
|
||||
makeTestFile: func(t *testing.T, tmp string) {
|
||||
validCache := emptyCache()
|
||||
|
||||
validCache.Entries = []entry{{
|
||||
Key: jsonSHA256Hex(testKey{K1: "v1", K2: "v2"}),
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Minute)),
|
||||
LastUsedTimestamp: metav1.NewTime(now.Add(-1 * time.Minute)),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
Token: "test-token",
|
||||
ExpirationTimestamp: &oneHourFromNow,
|
||||
},
|
||||
}}
|
||||
require.NoError(t, validCache.writeTo(tmp))
|
||||
},
|
||||
key: testKey{K1: "v1", K2: "v2"},
|
||||
wantErrors: []string{},
|
||||
want: &clientauthenticationv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Spec: clientauthenticationv1beta1.ExecCredentialSpec{},
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
Token: "test-token",
|
||||
ExpirationTimestamp: &oneHourFromNow,
|
||||
},
|
||||
},
|
||||
wantTestFile: func(t *testing.T, tmp string) {
|
||||
cache, err := readCache(tmp)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, cache.Entries, 1)
|
||||
require.Less(t, time.Since(cache.Entries[0].LastUsedTimestamp.Time).Nanoseconds(), (5 * time.Second).Nanoseconds())
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tmp := testutil.TempDir(t) + "/sessions.yaml"
|
||||
if tt.makeTestFile != nil {
|
||||
tt.makeTestFile(t, tmp)
|
||||
}
|
||||
|
||||
// Initialize a cache with a reporter that collects errors
|
||||
errors := errorCollector{t: t}
|
||||
c := New(tmp)
|
||||
c.errReporter = errors.report
|
||||
if tt.trylockFunc != nil {
|
||||
c.trylockFunc = func() error { return tt.trylockFunc(t) }
|
||||
}
|
||||
if tt.unlockFunc != nil {
|
||||
c.unlockFunc = func() error { return tt.unlockFunc(t) }
|
||||
}
|
||||
|
||||
got := c.Get(tt.key)
|
||||
require.Equal(t, tt.want, got)
|
||||
errors.require(tt.wantErrors, "TEMPFILE", tmp)
|
||||
if tt.wantTestFile != nil {
|
||||
tt.wantTestFile(t, tmp)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
now := time.Now().Round(1 * time.Second)
|
||||
|
||||
type testKey struct{ K1, K2 string }
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
makeTestFile func(t *testing.T, tmp string)
|
||||
key testKey
|
||||
cred *clientauthenticationv1beta1.ExecCredential
|
||||
wantErrors []string
|
||||
wantTestFile func(t *testing.T, tmp string)
|
||||
}{
|
||||
{
|
||||
name: "fail to create directory",
|
||||
makeTestFile: func(t *testing.T, tmp string) {
|
||||
require.NoError(t, ioutil.WriteFile(filepath.Dir(tmp), []byte{}, 0600))
|
||||
},
|
||||
wantErrors: []string{
|
||||
"could not create credential cache directory: mkdir TEMPDIR: not a directory",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update to existing entry",
|
||||
makeTestFile: func(t *testing.T, tmp string) {
|
||||
validCache := emptyCache()
|
||||
validCache.Entries = []entry{
|
||||
{
|
||||
Key: jsonSHA256Hex(testKey{K1: "v1", K2: "v2"}),
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Minute)),
|
||||
LastUsedTimestamp: metav1.NewTime(now.Add(-1 * time.Minute)),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: timePtr(now.Add(1 * time.Hour)),
|
||||
Token: "token-one",
|
||||
},
|
||||
},
|
||||
|
||||
// A second entry that was created over a day ago.
|
||||
{
|
||||
Key: jsonSHA256Hex(testKey{K1: "v3", K2: "v4"}),
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Hour)),
|
||||
LastUsedTimestamp: metav1.NewTime(now.Add(-1 * time.Hour)),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: timePtr(now.Add(1 * time.Hour)),
|
||||
Token: "token-two",
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, os.MkdirAll(filepath.Dir(tmp), 0700))
|
||||
require.NoError(t, validCache.writeTo(tmp))
|
||||
},
|
||||
key: testKey{K1: "v1", K2: "v2"},
|
||||
cred: &clientauthenticationv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: timePtr(now.Add(1 * time.Hour)),
|
||||
Token: "token-one",
|
||||
},
|
||||
},
|
||||
wantTestFile: func(t *testing.T, tmp string) {
|
||||
cache, err := readCache(tmp)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, cache.Entries, 1)
|
||||
require.Less(t, time.Since(cache.Entries[0].LastUsedTimestamp.Time).Nanoseconds(), (5 * time.Second).Nanoseconds())
|
||||
require.Equal(t, &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: timePtr(now.Add(1 * time.Hour).Local()),
|
||||
Token: "token-one",
|
||||
}, cache.Entries[0].Credential)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new entry",
|
||||
makeTestFile: func(t *testing.T, tmp string) {
|
||||
validCache := emptyCache()
|
||||
validCache.Entries = []entry{
|
||||
{
|
||||
Key: jsonSHA256Hex(testKey{K1: "v3", K2: "v4"}),
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Minute)),
|
||||
LastUsedTimestamp: metav1.NewTime(now.Add(-1 * time.Minute)),
|
||||
Credential: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: timePtr(now.Add(1 * time.Hour)),
|
||||
Token: "other-token",
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, os.MkdirAll(filepath.Dir(tmp), 0700))
|
||||
require.NoError(t, validCache.writeTo(tmp))
|
||||
},
|
||||
key: testKey{K1: "v1", K2: "v2"},
|
||||
cred: &clientauthenticationv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: timePtr(now.Add(1 * time.Hour)),
|
||||
Token: "token-one",
|
||||
},
|
||||
},
|
||||
wantTestFile: func(t *testing.T, tmp string) {
|
||||
cache, err := readCache(tmp)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, cache.Entries, 2)
|
||||
require.Less(t, time.Since(cache.Entries[1].LastUsedTimestamp.Time).Nanoseconds(), (5 * time.Second).Nanoseconds())
|
||||
require.Equal(t, &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: timePtr(now.Add(1 * time.Hour).Local()),
|
||||
Token: "token-one",
|
||||
}, cache.Entries[1].Credential)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "error writing cache",
|
||||
makeTestFile: func(t *testing.T, tmp string) {
|
||||
require.NoError(t, os.MkdirAll(tmp, 0700))
|
||||
},
|
||||
key: testKey{K1: "v1", K2: "v2"},
|
||||
cred: &clientauthenticationv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: timePtr(now.Add(1 * time.Hour)),
|
||||
Token: "token-one",
|
||||
},
|
||||
},
|
||||
wantErrors: []string{
|
||||
"failed to read cache, resetting: could not read cache file: read TEMPFILE: is a directory",
|
||||
"could not write cache: open TEMPFILE: is a directory",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tmp := testutil.TempDir(t) + "/cachedir/credentials.yaml"
|
||||
if tt.makeTestFile != nil {
|
||||
tt.makeTestFile(t, tmp)
|
||||
}
|
||||
// Initialize a cache with a reporter that collects errors
|
||||
errors := errorCollector{t: t}
|
||||
c := New(tmp)
|
||||
c.errReporter = errors.report
|
||||
c.Put(tt.key, tt.cred)
|
||||
errors.require(tt.wantErrors, "TEMPFILE", tmp, "TEMPDIR", filepath.Dir(tmp))
|
||||
if tt.wantTestFile != nil {
|
||||
tt.wantTestFile(t, tmp)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashing(t *testing.T) {
|
||||
type testKey struct{ K1, K2 string }
|
||||
require.Equal(t, "38e0b9de817f645c4bec37c0d4a3e58baecccb040f5718dc069a72c7385a0bed", jsonSHA256Hex(nil))
|
||||
require.Equal(t, "625bb1f93dc90a1bda400fdaceb8c96328e567a0c6aaf81e7fccc68958b4565d", jsonSHA256Hex([]string{"k1", "k2"}))
|
||||
require.Equal(t, "8fb659f5dd266ffd8d0c96116db1d96fe10e3879f9cb6f7e9ace016696ff69f6", jsonSHA256Hex(testKey{K1: "v1", K2: "v2"}))
|
||||
require.Equal(t, "42c783a2c29f91127b064df368bda61788181d2dd1709b417f9506102ea8da67", jsonSHA256Hex(testKey{K1: "v3", K2: "v4"}))
|
||||
require.Panics(t, func() { jsonSHA256Hex(&unmarshalable{}) })
|
||||
}
|
||||
|
||||
type errorCollector struct {
|
||||
t *testing.T
|
||||
saw []error
|
||||
}
|
||||
|
||||
func (e *errorCollector) report(err error) {
|
||||
e.saw = append(e.saw, err)
|
||||
}
|
||||
|
||||
func (e *errorCollector) require(want []string, subs ...string) {
|
||||
require.Len(e.t, e.saw, len(want))
|
||||
for i, w := range want {
|
||||
for i := 0; i < len(subs); i += 2 {
|
||||
w = strings.ReplaceAll(w, subs[i], subs[i+1])
|
||||
}
|
||||
require.EqualError(e.t, e.saw[i], w)
|
||||
}
|
||||
}
|
||||
|
||||
func timePtr(from time.Time) *metav1.Time {
|
||||
t := metav1.NewTime(from)
|
||||
return &t
|
||||
}
|
||||
|
||||
type unmarshalable struct{}
|
||||
|
||||
func (*unmarshalable) MarshalJSON() ([]byte, error) { return nil, fmt.Errorf("some MarshalJSON error") }
|
||||
1
internal/execcredcache/testdata/invalid.yaml
vendored
Normal file
1
internal/execcredcache/testdata/invalid.yaml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
invalid YAML
|
||||
9
internal/execcredcache/testdata/valid.yaml
vendored
Normal file
9
internal/execcredcache/testdata/valid.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: config.supervisor.pinniped.dev/v1alpha1
|
||||
kind: CredentialCache
|
||||
credentials:
|
||||
- key: "test-key"
|
||||
creationTimestamp: "2020-10-20T18:42:07Z"
|
||||
lastUsedTimestamp: "2020-10-20T18:45:31Z"
|
||||
credential:
|
||||
expirationTimestamp: "2020-10-20T19:46:30Z"
|
||||
token: "test-token"
|
||||
3
internal/execcredcache/testdata/wrong-version.yaml
vendored
Normal file
3
internal/execcredcache/testdata/wrong-version.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
apiVersion: config.supervisor.pinniped.dev/v2alpha6
|
||||
kind: NotACredentialCache
|
||||
credentials: []
|
||||
@@ -6,7 +6,6 @@ package kubeclient
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -32,39 +31,17 @@ func defaultServerUrlFor(config *restclient.Config) (*url.URL, string, error) {
|
||||
return restclient.DefaultServerURL(host, config.APIPath, schema.GroupVersion{}, defaultTLS)
|
||||
}
|
||||
|
||||
// truncateBody was copied from k8s.io/client-go/rest/request.go
|
||||
// ...except i changed klog invocations to analogous plog invocations
|
||||
//
|
||||
// truncateBody decides if the body should be truncated, based on the glog Verbosity.
|
||||
func truncateBody(body string) string {
|
||||
max := 0
|
||||
switch {
|
||||
case plog.Enabled(plog.LevelAll):
|
||||
return body
|
||||
case plog.Enabled(plog.LevelTrace):
|
||||
max = 10240
|
||||
case plog.Enabled(plog.LevelDebug):
|
||||
max = 1024
|
||||
}
|
||||
|
||||
if len(body) <= max {
|
||||
return body
|
||||
}
|
||||
|
||||
return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max)
|
||||
}
|
||||
|
||||
// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against
|
||||
// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
|
||||
// whether the body is printable.
|
||||
func glogBody(prefix string, body []byte) {
|
||||
if plog.Enabled(plog.LevelDebug) {
|
||||
if plog.Enabled(plog.LevelAll) {
|
||||
if bytes.IndexFunc(body, func(r rune) bool {
|
||||
return r < 0x0a
|
||||
}) != -1 {
|
||||
plog.Debug(prefix, "body", truncateBody(hex.Dump(body)))
|
||||
plog.Debug(prefix, "body", hex.Dump(body))
|
||||
} else {
|
||||
plog.Debug(prefix, "body", truncateBody(string(body)))
|
||||
plog.Debug(prefix, "body", string(body))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package oidc contains common OIDC functionality needed by Pinniped.
|
||||
@@ -164,13 +164,8 @@ type TimeoutsConfiguration struct {
|
||||
OIDCSessionStorageLifetime time.Duration
|
||||
|
||||
// AccessTokenSessionStorageLifetime is the length of time after which an access token's session data is allowed
|
||||
// to be garbage collected from storage. These must exist in storage for as long as the refresh token is valid.
|
||||
// Therefore, this can be just slightly longer than the AccessTokenLifespan. Access tokens are handed back to
|
||||
// the token endpoint for the token exchange use case. During a token exchange, if the access token is expired
|
||||
// and still exists in storage, then the endpoint will be able to give a slightly more specific error message,
|
||||
// rather than a more generic error that is returned when the token does not exist. If this is desirable, then
|
||||
// the AccessTokenSessionStorageLifetime can be made to be significantly larger than AccessTokenLifespan, at the
|
||||
// cost of slower cleanup.
|
||||
// to be garbage collected from storage. These must exist in storage for as long as the refresh token is valid
|
||||
// or else the refresh flow will not work properly. So this must be longer than RefreshTokenLifespan.
|
||||
AccessTokenSessionStorageLifetime time.Duration
|
||||
|
||||
// RefreshTokenSessionStorageLifetime is the length of time after which a refresh token's session data is allowed
|
||||
@@ -186,7 +181,7 @@ type TimeoutsConfiguration struct {
|
||||
|
||||
// Get the defaults for the Supervisor server.
|
||||
func DefaultOIDCTimeoutsConfiguration() TimeoutsConfiguration {
|
||||
accessTokenLifespan := 15 * time.Minute
|
||||
accessTokenLifespan := 2 * time.Minute
|
||||
authorizationCodeLifespan := 10 * time.Minute
|
||||
refreshTokenLifespan := 9 * time.Hour
|
||||
|
||||
@@ -199,7 +194,7 @@ func DefaultOIDCTimeoutsConfiguration() TimeoutsConfiguration {
|
||||
AuthorizationCodeSessionStorageLifetime: authorizationCodeLifespan + refreshTokenLifespan,
|
||||
PKCESessionStorageLifetime: authorizationCodeLifespan + (1 * time.Minute),
|
||||
OIDCSessionStorageLifetime: authorizationCodeLifespan + (1 * time.Minute),
|
||||
AccessTokenSessionStorageLifetime: accessTokenLifespan + (1 * time.Minute),
|
||||
AccessTokenSessionStorageLifetime: refreshTokenLifespan + accessTokenLifespan,
|
||||
RefreshTokenSessionStorageLifetime: refreshTokenLifespan + accessTokenLifespan,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package token
|
||||
@@ -60,8 +60,8 @@ const (
|
||||
hmacSecret = "this needs to be at least 32 characters to meet entropy requirements"
|
||||
|
||||
authCodeExpirationSeconds = 10 * 60 // Current, we set our auth code expiration to 10 minutes
|
||||
accessTokenExpirationSeconds = 15 * 60 // Currently, we set our access token expiration to 15 minutes
|
||||
idTokenExpirationSeconds = 15 * 60 // Currently, we set our ID token expiration to 15 minutes
|
||||
accessTokenExpirationSeconds = 2 * 60 // Currently, we set our access token expiration to 2 minutes
|
||||
idTokenExpirationSeconds = 2 * 60 // Currently, we set our ID token expiration to 2 minutes
|
||||
|
||||
timeComparisonFudgeSeconds = 15
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package plog implements a thin layer over klog to help enforce pinniped's logging convention.
|
||||
@@ -26,56 +26,157 @@
|
||||
// act of desperation to determine why the system is broken.
|
||||
package plog
|
||||
|
||||
import "k8s.io/klog/v2"
|
||||
import (
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const errorKey = "error"
|
||||
|
||||
// Use Error to log an unexpected system error.
|
||||
func Error(msg string, err error, keysAndValues ...interface{}) {
|
||||
klog.ErrorS(err, msg, keysAndValues...)
|
||||
type _ interface {
|
||||
Error(msg string, err error, keysAndValues ...interface{})
|
||||
Warning(msg string, keysAndValues ...interface{})
|
||||
WarningErr(msg string, err error, keysAndValues ...interface{})
|
||||
Info(msg string, keysAndValues ...interface{})
|
||||
InfoErr(msg string, err error, keysAndValues ...interface{})
|
||||
Debug(msg string, keysAndValues ...interface{})
|
||||
DebugErr(msg string, err error, keysAndValues ...interface{})
|
||||
Trace(msg string, keysAndValues ...interface{})
|
||||
TraceErr(msg string, err error, keysAndValues ...interface{})
|
||||
All(msg string, keysAndValues ...interface{})
|
||||
}
|
||||
|
||||
func Warning(msg string, keysAndValues ...interface{}) {
|
||||
type PLogger struct {
|
||||
prefix string
|
||||
depth int
|
||||
}
|
||||
|
||||
func New(prefix string) PLogger {
|
||||
return PLogger{
|
||||
depth: 0,
|
||||
prefix: prefix,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PLogger) Error(msg string, err error, keysAndValues ...interface{}) {
|
||||
klog.ErrorSDepth(p.depth+1, err, p.prefix+msg, keysAndValues...)
|
||||
}
|
||||
|
||||
func (p *PLogger) warningDepth(msg string, depth int, keysAndValues ...interface{}) {
|
||||
// klog's structured logging has no concept of a warning (i.e. no WarningS function)
|
||||
// Thus we use info at log level zero as a proxy
|
||||
// klog's info logs have an I prefix and its warning logs have a W prefix
|
||||
// Since we lose the W prefix by using InfoS, just add a key to make these easier to find
|
||||
keysAndValues = append([]interface{}{"warning", "true"}, keysAndValues...)
|
||||
klog.V(klogLevelWarning).InfoS(msg, keysAndValues...)
|
||||
if klog.V(klogLevelWarning).Enabled() {
|
||||
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PLogger) Warning(msg string, keysAndValues ...interface{}) {
|
||||
p.warningDepth(msg, p.depth+1, keysAndValues...)
|
||||
}
|
||||
|
||||
// Use WarningErr to issue a Warning message with an error object as part of the message.
|
||||
func (p *PLogger) WarningErr(msg string, err error, keysAndValues ...interface{}) {
|
||||
p.warningDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||
}
|
||||
|
||||
func (p *PLogger) infoDepth(msg string, depth int, keysAndValues ...interface{}) {
|
||||
if klog.V(klogLevelInfo).Enabled() {
|
||||
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PLogger) Info(msg string, keysAndValues ...interface{}) {
|
||||
p.infoDepth(msg, p.depth+1, keysAndValues...)
|
||||
}
|
||||
|
||||
// Use InfoErr to log an expected error, e.g. validation failure of an http parameter.
|
||||
func (p *PLogger) InfoErr(msg string, err error, keysAndValues ...interface{}) {
|
||||
p.infoDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||
}
|
||||
|
||||
func (p *PLogger) debugDepth(msg string, depth int, keysAndValues ...interface{}) {
|
||||
if klog.V(klogLevelDebug).Enabled() {
|
||||
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PLogger) Debug(msg string, keysAndValues ...interface{}) {
|
||||
p.debugDepth(msg, p.depth+1, keysAndValues...)
|
||||
}
|
||||
|
||||
// Use DebugErr to issue a Debug message with an error object as part of the message.
|
||||
func (p *PLogger) DebugErr(msg string, err error, keysAndValues ...interface{}) {
|
||||
p.debugDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||
}
|
||||
|
||||
func (p *PLogger) traceDepth(msg string, depth int, keysAndValues ...interface{}) {
|
||||
if klog.V(klogLevelTrace).Enabled() {
|
||||
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PLogger) Trace(msg string, keysAndValues ...interface{}) {
|
||||
p.traceDepth(msg, p.depth+1, keysAndValues...)
|
||||
}
|
||||
|
||||
// Use TraceErr to issue a Trace message with an error object as part of the message.
|
||||
func (p *PLogger) TraceErr(msg string, err error, keysAndValues ...interface{}) {
|
||||
p.traceDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||
}
|
||||
|
||||
func (p *PLogger) All(msg string, keysAndValues ...interface{}) {
|
||||
if klog.V(klogLevelAll).Enabled() {
|
||||
klog.InfoSDepth(p.depth+1, p.prefix+msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
var pLogger = PLogger{ //nolint:gochecknoglobals
|
||||
depth: 1,
|
||||
}
|
||||
|
||||
// Use Error to log an unexpected system error.
|
||||
func Error(msg string, err error, keysAndValues ...interface{}) {
|
||||
pLogger.Error(msg, err, keysAndValues...)
|
||||
}
|
||||
|
||||
func Warning(msg string, keysAndValues ...interface{}) {
|
||||
pLogger.Warning(msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// Use WarningErr to issue a Warning message with an error object as part of the message.
|
||||
func WarningErr(msg string, err error, keysAndValues ...interface{}) {
|
||||
Warning(msg, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||
pLogger.WarningErr(msg, err, keysAndValues...)
|
||||
}
|
||||
|
||||
func Info(msg string, keysAndValues ...interface{}) {
|
||||
klog.V(klogLevelInfo).InfoS(msg, keysAndValues...)
|
||||
pLogger.Info(msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// Use InfoErr to log an expected error, e.g. validation failure of an http parameter.
|
||||
func InfoErr(msg string, err error, keysAndValues ...interface{}) {
|
||||
Info(msg, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||
pLogger.InfoErr(msg, err, keysAndValues...)
|
||||
}
|
||||
|
||||
func Debug(msg string, keysAndValues ...interface{}) {
|
||||
klog.V(klogLevelDebug).InfoS(msg, keysAndValues...)
|
||||
pLogger.Debug(msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// Use DebugErr to issue a Debug message with an error object as part of the message.
|
||||
func DebugErr(msg string, err error, keysAndValues ...interface{}) {
|
||||
Debug(msg, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||
pLogger.DebugErr(msg, err, keysAndValues...)
|
||||
}
|
||||
|
||||
func Trace(msg string, keysAndValues ...interface{}) {
|
||||
klog.V(klogLevelTrace).InfoS(msg, keysAndValues...)
|
||||
pLogger.Trace(msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// Use TraceErr to issue a Trace message with an error object as part of the message.
|
||||
func TraceErr(msg string, err error, keysAndValues ...interface{}) {
|
||||
Trace(msg, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||
pLogger.TraceErr(msg, err, keysAndValues...)
|
||||
}
|
||||
|
||||
func All(msg string, keysAndValues ...interface{}) {
|
||||
klog.V(klogLevelAll).InfoS(msg, keysAndValues...)
|
||||
pLogger.All(msg, keysAndValues...)
|
||||
}
|
||||
|
||||
14
internal/valuelesscontext/valuelesscontext.go
Normal file
14
internal/valuelesscontext/valuelesscontext.go
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package valuelesscontext
|
||||
|
||||
import "context"
|
||||
|
||||
func New(ctx context.Context) context.Context {
|
||||
return valuelessContext{Context: ctx}
|
||||
}
|
||||
|
||||
type valuelessContext struct{ context.Context }
|
||||
|
||||
func (valuelessContext) Value(interface{}) interface{} { return nil }
|
||||
242
internal/valuelesscontext/valuelesscontext_test.go
Normal file
242
internal/valuelesscontext/valuelesscontext_test.go
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package valuelesscontext
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type contextKey int
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
f func(*testing.T, context.Context) context.Context
|
||||
wantReg, wantNew, wantBoth func(*testing.T, context.Context)
|
||||
}{
|
||||
{
|
||||
name: "empty context",
|
||||
f: func(t *testing.T, ctx context.Context) context.Context {
|
||||
return ctx
|
||||
},
|
||||
wantReg: func(t *testing.T, ctx context.Context) {},
|
||||
wantNew: func(t *testing.T, ctx context.Context) {},
|
||||
wantBoth: func(t *testing.T, ctx context.Context) {
|
||||
auds, ok := authenticator.AudiencesFrom(ctx)
|
||||
require.False(t, ok)
|
||||
require.Nil(t, auds)
|
||||
|
||||
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
|
||||
require.False(t, ok)
|
||||
require.Zero(t, val)
|
||||
|
||||
deadline, ok := ctx.Deadline()
|
||||
require.False(t, ok)
|
||||
require.Zero(t, deadline)
|
||||
|
||||
require.Nil(t, ctx.Done())
|
||||
|
||||
require.NoError(t, ctx.Err())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "context with audience",
|
||||
f: func(t *testing.T, ctx context.Context) context.Context {
|
||||
return authenticator.WithAudiences(ctx, authenticator.Audiences{"1", "2"})
|
||||
},
|
||||
wantReg: func(t *testing.T, ctx context.Context) {
|
||||
auds, ok := authenticator.AudiencesFrom(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, authenticator.Audiences{"1", "2"}, auds)
|
||||
},
|
||||
wantNew: func(t *testing.T, ctx context.Context) {
|
||||
auds, ok := authenticator.AudiencesFrom(ctx)
|
||||
require.False(t, ok)
|
||||
require.Nil(t, auds)
|
||||
},
|
||||
wantBoth: func(t *testing.T, ctx context.Context) {
|
||||
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
|
||||
require.False(t, ok)
|
||||
require.Zero(t, val)
|
||||
|
||||
deadline, ok := ctx.Deadline()
|
||||
require.False(t, ok)
|
||||
require.Zero(t, deadline)
|
||||
|
||||
require.Nil(t, ctx.Done())
|
||||
|
||||
require.NoError(t, ctx.Err())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "context with audience and past deadline",
|
||||
f: func(t *testing.T, ctx context.Context) context.Context {
|
||||
ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"})
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithDeadline(ctx, time.Now().Add(-time.Hour))
|
||||
t.Cleanup(cancel)
|
||||
return ctx
|
||||
},
|
||||
wantReg: func(t *testing.T, ctx context.Context) {
|
||||
auds, ok := authenticator.AudiencesFrom(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, authenticator.Audiences{"3", "4"}, auds)
|
||||
},
|
||||
wantNew: func(t *testing.T, ctx context.Context) {
|
||||
auds, ok := authenticator.AudiencesFrom(ctx)
|
||||
require.False(t, ok)
|
||||
require.Nil(t, auds)
|
||||
},
|
||||
wantBoth: func(t *testing.T, ctx context.Context) {
|
||||
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
|
||||
require.False(t, ok)
|
||||
require.Zero(t, val)
|
||||
|
||||
deadline, ok := ctx.Deadline()
|
||||
require.True(t, ok)
|
||||
require.NotZero(t, deadline)
|
||||
require.True(t, deadline.Before(time.Now()))
|
||||
|
||||
ch := ctx.Done()
|
||||
require.NotNil(t, ch)
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Error("expected closed done channel")
|
||||
}
|
||||
|
||||
require.Equal(t, context.DeadlineExceeded, ctx.Err())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "context with audience and custom value and past deadline",
|
||||
f: func(t *testing.T, ctx context.Context) context.Context {
|
||||
ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"})
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithDeadline(ctx, time.Now().Add(-time.Hour))
|
||||
t.Cleanup(cancel)
|
||||
ctx = context.WithValue(ctx, contextKey(0xDEADBEEF), "mooo")
|
||||
return ctx
|
||||
},
|
||||
wantReg: func(t *testing.T, ctx context.Context) {
|
||||
auds, ok := authenticator.AudiencesFrom(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, authenticator.Audiences{"3", "4"}, auds)
|
||||
|
||||
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "mooo", val)
|
||||
},
|
||||
wantNew: func(t *testing.T, ctx context.Context) {
|
||||
auds, ok := authenticator.AudiencesFrom(ctx)
|
||||
require.False(t, ok)
|
||||
require.Nil(t, auds)
|
||||
|
||||
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
|
||||
require.False(t, ok)
|
||||
require.Zero(t, val)
|
||||
},
|
||||
wantBoth: func(t *testing.T, ctx context.Context) {
|
||||
deadline, ok := ctx.Deadline()
|
||||
require.True(t, ok)
|
||||
require.NotZero(t, deadline)
|
||||
require.True(t, deadline.Before(time.Now()))
|
||||
|
||||
ch := ctx.Done()
|
||||
require.NotNil(t, ch)
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Error("expected closed done channel")
|
||||
}
|
||||
|
||||
require.Equal(t, context.DeadlineExceeded, ctx.Err())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "context with audience and custom value and future deadline",
|
||||
f: func(t *testing.T, ctx context.Context) context.Context {
|
||||
ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"})
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithDeadline(ctx, time.Now().Add(time.Hour))
|
||||
t.Cleanup(cancel)
|
||||
ctx = context.WithValue(ctx, contextKey(0xDEADBEEF), "mooo")
|
||||
return ctx
|
||||
},
|
||||
wantReg: func(t *testing.T, ctx context.Context) {
|
||||
auds, ok := authenticator.AudiencesFrom(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, authenticator.Audiences{"3", "4"}, auds)
|
||||
|
||||
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "mooo", val)
|
||||
},
|
||||
wantNew: func(t *testing.T, ctx context.Context) {
|
||||
auds, ok := authenticator.AudiencesFrom(ctx)
|
||||
require.False(t, ok)
|
||||
require.Nil(t, auds)
|
||||
|
||||
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
|
||||
require.False(t, ok)
|
||||
require.Zero(t, val)
|
||||
},
|
||||
wantBoth: func(t *testing.T, ctx context.Context) {
|
||||
deadline, ok := ctx.Deadline()
|
||||
require.True(t, ok)
|
||||
require.NotZero(t, deadline)
|
||||
require.True(t, deadline.After(time.Now()))
|
||||
|
||||
ch := ctx.Done()
|
||||
require.NotNil(t, ch)
|
||||
select {
|
||||
case <-ch:
|
||||
t.Error("expected not closed done channel")
|
||||
case <-time.After(3 * time.Second):
|
||||
}
|
||||
|
||||
require.NoError(t, ctx.Err())
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := tt.f(t, context.Background())
|
||||
|
||||
t.Run("reg", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tt.wantReg(t, ctx)
|
||||
})
|
||||
|
||||
t.Run("reg-both", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tt.wantBoth(t, ctx)
|
||||
})
|
||||
|
||||
t.Run("new", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tt.wantNew(t, New(ctx))
|
||||
})
|
||||
|
||||
t.Run("new-both", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tt.wantBoth(t, New(ctx))
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package filesession implements a simple YAML file-based login.sessionCache.
|
||||
@@ -137,9 +137,15 @@ func (c *Cache) withCache(transact func(*sessionCache)) {
|
||||
cache = emptySessionCache()
|
||||
}
|
||||
|
||||
// Normalize the cache before modifying it, to remove any entries that have already expired.
|
||||
cache = cache.normalized()
|
||||
|
||||
// Process/mutate the session using the provided function.
|
||||
transact(cache)
|
||||
|
||||
// Normalize again to put everything into a known order.
|
||||
cache = cache.normalized()
|
||||
|
||||
// Marshal the session back to YAML and save it to the file.
|
||||
if err := cache.writeTo(c.path); err != nil {
|
||||
c.errReporter(fmt.Errorf("could not write session cache: %w", err))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package filesession
|
||||
@@ -125,6 +125,41 @@ func TestGetToken(t *testing.T) {
|
||||
},
|
||||
wantErrors: []string{},
|
||||
},
|
||||
{
|
||||
name: "valid file but expired cache hit",
|
||||
makeTestFile: func(t *testing.T, tmp string) {
|
||||
validCache := emptySessionCache()
|
||||
validCache.insert(sessionEntry{
|
||||
Key: oidcclient.SessionCacheKey{
|
||||
Issuer: "test-issuer",
|
||||
ClientID: "test-client-id",
|
||||
Scopes: []string{"email", "offline_access", "openid", "profile"},
|
||||
RedirectURI: "http://localhost:0/callback",
|
||||
},
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Hour)),
|
||||
LastUsedTimestamp: metav1.NewTime(now.Add(-1 * time.Hour)),
|
||||
Tokens: oidctypes.Token{
|
||||
AccessToken: &oidctypes.AccessToken{
|
||||
Token: "test-access-token",
|
||||
Type: "Bearer",
|
||||
Expiry: metav1.NewTime(now.Add(-1 * time.Hour)),
|
||||
},
|
||||
IDToken: &oidctypes.IDToken{
|
||||
Token: "test-id-token",
|
||||
Expiry: metav1.NewTime(now.Add(-1 * time.Hour)),
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, validCache.writeTo(tmp))
|
||||
},
|
||||
key: oidcclient.SessionCacheKey{
|
||||
Issuer: "test-issuer",
|
||||
ClientID: "test-client-id",
|
||||
Scopes: []string{"email", "offline_access", "openid", "profile"},
|
||||
RedirectURI: "http://localhost:0/callback",
|
||||
},
|
||||
wantErrors: []string{},
|
||||
},
|
||||
{
|
||||
name: "valid file with cache hit",
|
||||
makeTestFile: func(t *testing.T, tmp string) {
|
||||
@@ -261,6 +296,33 @@ func TestPutToken(t *testing.T) {
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Insert another entry that hasn't been used for over 90 days.
|
||||
validCache.insert(sessionEntry{
|
||||
Key: oidcclient.SessionCacheKey{
|
||||
Issuer: "test-issuer-2",
|
||||
ClientID: "test-client-id-2",
|
||||
Scopes: []string{"email", "offline_access", "openid", "profile"},
|
||||
RedirectURI: "http://localhost:0/callback",
|
||||
},
|
||||
CreationTimestamp: metav1.NewTime(now.Add(-95 * 24 * time.Hour)),
|
||||
LastUsedTimestamp: metav1.NewTime(now.Add(-91 * 24 * time.Hour)),
|
||||
Tokens: oidctypes.Token{
|
||||
AccessToken: &oidctypes.AccessToken{
|
||||
Token: "old-access-token2",
|
||||
Type: "Bearer",
|
||||
Expiry: metav1.NewTime(now.Add(-1 * time.Hour)),
|
||||
},
|
||||
IDToken: &oidctypes.IDToken{
|
||||
Token: "old-id-token2",
|
||||
Expiry: metav1.NewTime(now.Add(-1 * time.Hour)),
|
||||
},
|
||||
RefreshToken: &oidctypes.RefreshToken{
|
||||
Token: "old-refresh-token2",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
require.NoError(t, os.MkdirAll(filepath.Dir(tmp), 0700))
|
||||
require.NoError(t, validCache.writeTo(tmp))
|
||||
},
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package oidcclient implements a CLI OIDC login flow.
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/browser"
|
||||
"golang.org/x/oauth2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -44,11 +45,14 @@ const (
|
||||
// overallTimeout is the overall time that a login is allowed to take. This includes several user interactions, so
|
||||
// we set this to be relatively long.
|
||||
overallTimeout = 90 * time.Minute
|
||||
|
||||
debugLogLevel = 4
|
||||
)
|
||||
|
||||
type handlerState struct {
|
||||
// Basic parameters.
|
||||
ctx context.Context
|
||||
logger logr.Logger
|
||||
issuer string
|
||||
clientID string
|
||||
scopes []string
|
||||
@@ -97,6 +101,15 @@ func WithContext(ctx context.Context) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogger specifies a PLogger to use with the login.
|
||||
// If not specified this will default to a new logger.
|
||||
func WithLogger(logger logr.Logger) Option {
|
||||
return func(h *handlerState) error {
|
||||
h.logger = logger
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithListenPort specifies a TCP listen port on localhost, which will be used for the redirect_uri and to handle the
|
||||
// authorization code callback. By default, a random high port will be chosen which requires the authorization server
|
||||
// to support wildcard port numbers as described by https://tools.ietf.org/html/rfc8252:
|
||||
@@ -183,6 +196,7 @@ func Login(issuer string, clientID string, opts ...Option) (*oidctypes.Token, er
|
||||
cache: &nopCache{},
|
||||
callbackPath: "/callback",
|
||||
ctx: context.Background(),
|
||||
logger: logr.Discard(), // discard logs unless a logger is specified
|
||||
callbacks: make(chan callbackResult),
|
||||
httpClient: http.DefaultClient,
|
||||
|
||||
@@ -260,6 +274,7 @@ func (h *handlerState) baseLogin() (*oidctypes.Token, error) {
|
||||
// If the ID token is still valid for a bit, return it immediately and skip the rest of the flow.
|
||||
cached := h.cache.GetToken(cacheKey)
|
||||
if cached != nil && cached.IDToken != nil && time.Until(cached.IDToken.Expiry.Time) > minIDTokenValidity {
|
||||
h.logger.V(debugLogLevel).Info("Pinniped: Found unexpired cached token.")
|
||||
return cached, nil
|
||||
}
|
||||
|
||||
@@ -327,6 +342,7 @@ func (h *handlerState) initOIDCDiscovery() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
h.logger.V(debugLogLevel).Info("Pinniped: Performing OIDC discovery", "issuer", h.issuer)
|
||||
var err error
|
||||
h.provider, err = oidc.NewProvider(h.ctx, h.issuer)
|
||||
if err != nil {
|
||||
@@ -343,6 +359,7 @@ func (h *handlerState) initOIDCDiscovery() error {
|
||||
}
|
||||
|
||||
func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidctypes.Token, error) {
|
||||
h.logger.V(debugLogLevel).Info("Pinniped: Performing RFC8693 token exchange", "requestedAudience", h.requestedAudience)
|
||||
// Perform OIDC discovery. This may have already been performed if there was not a cached base token.
|
||||
if err := h.initOIDCDiscovery(); err != nil {
|
||||
return nil, err
|
||||
@@ -413,6 +430,7 @@ func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidcty
|
||||
}
|
||||
|
||||
func (h *handlerState) handleRefresh(ctx context.Context, refreshToken *oidctypes.RefreshToken) (*oidctypes.Token, error) {
|
||||
h.logger.V(debugLogLevel).Info("Pinniped: Refreshing cached token.")
|
||||
refreshSource := h.oauth2Config.TokenSource(ctx, &oauth2.Token{RefreshToken: refreshToken.Token})
|
||||
|
||||
refreshed, err := refreshSource.Token()
|
||||
|
||||
@@ -13,17 +13,21 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/stdr"
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/oauth2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"go.pinniped.dev/internal/httputil/httperr"
|
||||
"go.pinniped.dev/internal/mocks/mockupstreamoidcidentityprovider"
|
||||
"go.pinniped.dev/internal/oidc/provider"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
"go.pinniped.dev/internal/testutil/testlogger"
|
||||
"go.pinniped.dev/pkg/oidcclient/nonce"
|
||||
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
||||
"go.pinniped.dev/pkg/oidcclient/pkce"
|
||||
@@ -205,6 +209,7 @@ func TestLogin(t *testing.T) {
|
||||
clientID string
|
||||
wantErr string
|
||||
wantToken *oidctypes.Token
|
||||
wantLogs []string
|
||||
}{
|
||||
{
|
||||
name: "option error",
|
||||
@@ -269,7 +274,8 @@ func TestLogin(t *testing.T) {
|
||||
return WithSessionCache(cache)(h)
|
||||
}
|
||||
},
|
||||
wantErr: `could not perform OIDC discovery for "test-issuer": Get "test-issuer/.well-known/openid-configuration": unsupported protocol scheme ""`,
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"test-issuer\""},
|
||||
wantErr: `could not perform OIDC discovery for "test-issuer": Get "test-issuer/.well-known/openid-configuration": unsupported protocol scheme ""`,
|
||||
},
|
||||
{
|
||||
name: "session cache hit with valid token",
|
||||
@@ -290,6 +296,7 @@ func TestLogin(t *testing.T) {
|
||||
return WithSessionCache(cache)(h)
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\""},
|
||||
wantToken: &testToken,
|
||||
},
|
||||
{
|
||||
@@ -297,8 +304,9 @@ func TestLogin(t *testing.T) {
|
||||
opt: func(t *testing.T) Option {
|
||||
return func(h *handlerState) error { return nil }
|
||||
},
|
||||
issuer: errorServer.URL,
|
||||
wantErr: fmt.Sprintf("could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL),
|
||||
issuer: errorServer.URL,
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""},
|
||||
wantErr: fmt.Sprintf("could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL),
|
||||
},
|
||||
{
|
||||
name: "session cache hit with refreshable token",
|
||||
@@ -337,6 +345,8 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Refreshing cached token.\""},
|
||||
wantToken: &testToken,
|
||||
},
|
||||
{
|
||||
@@ -369,6 +379,8 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Refreshing cached token.\""},
|
||||
wantErr: "some validation error",
|
||||
},
|
||||
{
|
||||
@@ -395,6 +407,8 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Refreshing cached token.\""},
|
||||
// Expect this to fall through to the authorization code flow, so it fails here.
|
||||
wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address",
|
||||
},
|
||||
@@ -406,8 +420,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
issuer: successServer.URL,
|
||||
wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address",
|
||||
issuer: successServer.URL,
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address",
|
||||
},
|
||||
{
|
||||
name: "browser open failure",
|
||||
@@ -416,8 +431,9 @@ func TestLogin(t *testing.T) {
|
||||
return fmt.Errorf("some browser open error")
|
||||
})
|
||||
},
|
||||
issuer: successServer.URL,
|
||||
wantErr: "could not open browser: some browser open error",
|
||||
issuer: successServer.URL,
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantErr: "could not open browser: some browser open error",
|
||||
},
|
||||
{
|
||||
name: "timeout waiting for callback",
|
||||
@@ -433,8 +449,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
issuer: successServer.URL,
|
||||
wantErr: "timed out waiting for token callback: context canceled",
|
||||
issuer: successServer.URL,
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantErr: "timed out waiting for token callback: context canceled",
|
||||
},
|
||||
{
|
||||
name: "callback returns error",
|
||||
@@ -449,8 +466,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
issuer: successServer.URL,
|
||||
wantErr: "error handling callback: some callback error",
|
||||
issuer: successServer.URL,
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantErr: "error handling callback: some callback error",
|
||||
},
|
||||
{
|
||||
name: "callback returns success",
|
||||
@@ -510,6 +528,7 @@ func TestLogin(t *testing.T) {
|
||||
}
|
||||
},
|
||||
issuer: successServer.URL,
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantToken: &testToken,
|
||||
},
|
||||
{
|
||||
@@ -533,6 +552,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"cluster-1234\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""},
|
||||
wantErr: fmt.Sprintf("failed to exchange token: could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL),
|
||||
},
|
||||
{
|
||||
@@ -556,6 +578,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"cluster-1234\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + brokenTokenURLServer.URL + "\""},
|
||||
wantErr: `failed to exchange token: could not build RFC8693 request: parse "%": invalid URL escape "%"`,
|
||||
},
|
||||
{
|
||||
@@ -579,6 +604,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-http-response\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantErr: fmt.Sprintf(`failed to exchange token: Post "%s/token": failed to parse Location header "%%": parse "%%": invalid URL escape "%%"`, successServer.URL),
|
||||
},
|
||||
{
|
||||
@@ -602,6 +630,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-http-400\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantErr: `failed to exchange token: unexpected HTTP response status 400`,
|
||||
},
|
||||
{
|
||||
@@ -625,6 +656,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-content-type\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantErr: `failed to exchange token: failed to decode content-type header: mime: invalid media parameter`,
|
||||
},
|
||||
{
|
||||
@@ -648,6 +682,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-wrong-content-type\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantErr: `failed to exchange token: unexpected HTTP response content type "invalid"`,
|
||||
},
|
||||
{
|
||||
@@ -671,6 +708,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-json\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantErr: `failed to exchange token: failed to decode response: unexpected EOF`,
|
||||
},
|
||||
{
|
||||
@@ -694,6 +734,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-tokentype\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantErr: `failed to exchange token: got unexpected token_type "invalid"`,
|
||||
},
|
||||
{
|
||||
@@ -717,6 +760,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-issuedtokentype\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantErr: `failed to exchange token: got unexpected issued_token_type "invalid"`,
|
||||
},
|
||||
{
|
||||
@@ -740,6 +786,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-jwt\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantErr: `failed to exchange token: received invalid JWT: oidc: malformed jwt: square/go-jose: compact JWS format must have three parts`,
|
||||
},
|
||||
{
|
||||
@@ -769,6 +818,9 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||
wantToken: &testExchangedToken,
|
||||
},
|
||||
{
|
||||
@@ -816,18 +868,29 @@ func TestLogin(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
},
|
||||
wantLogs: []string{
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Refreshing cached token.\"",
|
||||
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience\"",
|
||||
},
|
||||
wantToken: &testExchangedToken,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testLogger := testlogger.New(t)
|
||||
klog.SetLogger(testLogger)
|
||||
stdr.SetVerbosity(debugLogLevel) // set stdr's global log level to debug so the test logger will send output.
|
||||
|
||||
tok, err := Login(tt.issuer, tt.clientID,
|
||||
WithContext(context.Background()),
|
||||
WithListenPort(0),
|
||||
WithScopes([]string{"test-scope"}),
|
||||
tt.opt(t),
|
||||
WithLogger(testLogger),
|
||||
)
|
||||
require.Equal(t, tt.wantLogs, testLogger.Lines())
|
||||
if tt.wantErr != "" {
|
||||
require.EqualError(t, err, tt.wantErr)
|
||||
require.Nil(t, tok)
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://127.0.0.1:8444
|
||||
insecure-skip-tls-verify: true
|
||||
name: kind-pinniped
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kind-pinniped
|
||||
user: kind-pinniped
|
||||
name: kind-pinniped
|
||||
current-context: kind-pinniped
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kind-pinniped
|
||||
9
public/categories/index.xml
Normal file
9
public/categories/index.xml
Normal file
@@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
|
||||
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
|
||||
<channel>
|
||||
<title>Categories on </title>
|
||||
<link>/categories/</link>
|
||||
<description>Recent content in Categories on </description>
|
||||
<generator>Hugo -- gohugo.io</generator><atom:link href="/categories/index.xml" rel="self" type="application/rss+xml" />
|
||||
</channel>
|
||||
</rss>
|
||||
9
public/index.xml
Normal file
9
public/index.xml
Normal file
@@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
|
||||
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
|
||||
<channel>
|
||||
<title></title>
|
||||
<link>/</link>
|
||||
<description>Recent content on </description>
|
||||
<generator>Hugo -- gohugo.io</generator><atom:link href="/index.xml" rel="self" type="application/rss+xml" />
|
||||
</channel>
|
||||
</rss>
|
||||
11
public/sitemap.xml
Normal file
11
public/sitemap.xml
Normal file
@@ -0,0 +1,11 @@
|
||||
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
|
||||
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
|
||||
xmlns:xhtml="http://www.w3.org/1999/xhtml">
|
||||
<url>
|
||||
<loc>/</loc>
|
||||
</url><url>
|
||||
<loc>/categories/</loc>
|
||||
</url><url>
|
||||
<loc>/tags/</loc>
|
||||
</url>
|
||||
</urlset>
|
||||
9
public/tags/index.xml
Normal file
9
public/tags/index.xml
Normal file
@@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
|
||||
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
|
||||
<channel>
|
||||
<title>Tags on </title>
|
||||
<link>/tags/</link>
|
||||
<description>Recent content in Tags on </description>
|
||||
<generator>Hugo -- gohugo.io</generator><atom:link href="/tags/index.xml" rel="self" type="application/rss+xml" />
|
||||
</channel>
|
||||
</rss>
|
||||
140
site/content/docs/howto/configure-supervisor-with-gitlab.md
Normal file
140
site/content/docs/howto/configure-supervisor-with-gitlab.md
Normal file
@@ -0,0 +1,140 @@
|
||||
---
|
||||
title: Configure the Pinniped Supervisor to use GitLab as an OIDC Provider
|
||||
description: Set up the Pinniped Supervisor to use GitLab login.
|
||||
cascade:
|
||||
layout: docs
|
||||
menu:
|
||||
docs:
|
||||
name: Configure Supervisor With GitLab
|
||||
weight: 35
|
||||
parent: howtos
|
||||
---
|
||||
The Supervisor is an [OpenID Connect (OIDC)](https://openid.net/connect/) issuer that supports connecting a single "upstream" OIDC identity provider to many "downstream" cluster clients.
|
||||
|
||||
This guide shows you how to configure the Supervisor so that users can authenticate to their Kubernetes
|
||||
cluster using their GitLab credentials.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
This how-to guide assumes that you have already [installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress,
|
||||
and that you have [configured a `FederationDomain` to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}).
|
||||
|
||||
## Configure your GitLab Application
|
||||
|
||||
Follow the instructions for [using GitLab as an OAuth2 authentication service provider](https://docs.gitlab.com/ee/integration/oauth_provider.html) and create a user, group, or instance-wide application.
|
||||
|
||||
For example, to create a user-owned application:
|
||||
|
||||
1. In GitLab, navigate to [_User Settings_ > _Applications_](https://gitlab.com/-/profile/applications)
|
||||
1. Create a new application:
|
||||
1. Enter a name for your application, such as "My Kubernetes Clusters".
|
||||
1. Enter the redirect URI. This is the `spec.issuer` you configured in your `FederationDomain` appended with `/callback`.
|
||||
1. Check the box saying that the application is _Confidential_.
|
||||
1. Select scope `openid`. This provides access to the `nickname` (GitLab username) and `groups` (GitLab groups) claims.
|
||||
1. Save the application and make note of the _Application ID_ and _Secret_.
|
||||
|
||||
## Configure the Supervisor cluster
|
||||
|
||||
Create an [OIDCIdentityProvider](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#oidcidentityprovider) in the same namespace as the Supervisor.
|
||||
|
||||
For example, this OIDCIdentityProvider and corresponding Secret for [gitlab.com](https://gitlab.com) use the `nickname` claim (GitLab username) as the Kubernetes username:
|
||||
|
||||
```yaml
|
||||
apiVersion: idp.supervisor.pinniped.dev/v1alpha1
|
||||
kind: OIDCIdentityProvider
|
||||
metadata:
|
||||
namespace: pinniped-supervisor
|
||||
name: gitlab
|
||||
spec:
|
||||
|
||||
# Specify the upstream issuer URL.
|
||||
issuer: https://gitlab.com
|
||||
|
||||
# Specify how GitLab claims are mapped to Kubernetes identities.
|
||||
claims:
|
||||
|
||||
# Specify the name of the claim in your GitLab token that will be mapped
|
||||
# to the "username" claim in downstream tokens minted by the Supervisor.
|
||||
username: nickname
|
||||
|
||||
# Specify the name of the claim in GitLab that represents the groups
|
||||
# that the user belongs to. Note that GitLab's "groups" claim comes from
|
||||
# their "/userinfo" endpoint, not the token.
|
||||
groups: groups
|
||||
|
||||
# Specify the name of the Kubernetes Secret that contains your GitLab
|
||||
# application's client credentials (created below).
|
||||
client:
|
||||
secretName: gitlab-client-credentials
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
namespace: pinniped-supervisor
|
||||
name: gitlab-client-credentials
|
||||
type: secrets.pinniped.dev/oidc-client
|
||||
stringData:
|
||||
|
||||
# The "Application ID" that you got from GitLab.
|
||||
clientID: "<your-client-id>"
|
||||
|
||||
# The "Secret" that you got from GitLab.
|
||||
clientSecret: "<your-client-secret>"
|
||||
```
|
||||
|
||||
Once your OIDCIdentityProvider has been created, you can validate your configuration by running:
|
||||
|
||||
```shell
|
||||
kubectl describe OIDCIdentityProvider -n pinniped-supervisor gitlab
|
||||
```
|
||||
|
||||
Look at the `status` field. If it was configured correctly, you should see `phase: Ready`.
|
||||
|
||||
### (Optional) Use a different GitLab claim for Kubernetes usernames
|
||||
|
||||
You can also use other GitLab claims as the username.
|
||||
To do this, make sure you have configured the appropriate scopes on your GitLab application, such as `email`.
|
||||
|
||||
You must also adjust the `spec.authorizationConfig` to request those scopes at login and adjust `spec.claims` to use those claims in Kubernetes, for example:
|
||||
|
||||
```yaml
|
||||
# [...]
|
||||
spec:
|
||||
# Request any scopes other than "openid" that you selected when
|
||||
# creating your GitLab application. The "openid" scope is always
|
||||
# included.
|
||||
#
|
||||
# See here for a full list of available claims:
|
||||
# https://docs.gitlab.com/ee/integration/openid_connect_provider.html
|
||||
authorizationConfig:
|
||||
additionalScopes: [ email ]
|
||||
claims:
|
||||
username: email
|
||||
groups: groups
|
||||
# [...]
|
||||
```
|
||||
|
||||
### (Optional) Use a private GitLab instance
|
||||
|
||||
To use privately hosted instance of GitLab, you can change the `spec.issuer` and `spec.tls.certificateAuthorityData` fields, for example:
|
||||
|
||||
```yaml
|
||||
apiVersion: idp.supervisor.pinniped.dev/v1alpha1
|
||||
kind: OIDCIdentityProvider
|
||||
# [...]
|
||||
spec:
|
||||
# Specify your GitLab instance URL.
|
||||
issuer: https://gitlab.your-company.example.com.
|
||||
|
||||
# Specify the CA bundle for the GitLab server as base64-encoded PEM
|
||||
# data. For example, the output of `cat my-ca-bundle.pem | base64`.
|
||||
#
|
||||
# This is only necessary if your instance uses a custom CA.
|
||||
tls:
|
||||
certificateAuthorityData: "<gitlab-ca-bundle>"
|
||||
# [...]
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
Now that you have configured the Supervisor to use GitLab, you may want to [configure the Concierge to validate JWTs issued by the Supervisor]({{< ref "configure-concierge-jwt" >}}).
|
||||
@@ -10,98 +10,108 @@ menu:
|
||||
parent: reference
|
||||
---
|
||||
|
||||
## `pinniped version`
|
||||
## pinniped get kubeconfig
|
||||
|
||||
Print the version of this Pinniped CLI.
|
||||
Generate a Pinniped-based kubeconfig for a cluster
|
||||
|
||||
```sh
|
||||
pinniped version [flags]
|
||||
```
|
||||
|
||||
- `-h`, `--help`:
|
||||
|
||||
help for kubeconfig
|
||||
|
||||
## `pinniped get kubeconfig`
|
||||
|
||||
Generate a Pinniped-based kubeconfig for a cluster.
|
||||
|
||||
```sh
|
||||
pinniped get kubeconfig [flags]
|
||||
```
|
||||
|
||||
- `-h`, `--help`:
|
||||
### Options
|
||||
|
||||
help for kubeconfig
|
||||
- `--concierge-api-group-suffix string`:
|
||||
```
|
||||
--concierge-api-group-suffix string Concierge API group suffix (default "pinniped.dev")
|
||||
--concierge-authenticator-name string Concierge authenticator name (default: autodiscover)
|
||||
--concierge-authenticator-type string Concierge authenticator type (e.g., 'webhook', 'jwt') (default: autodiscover)
|
||||
--concierge-ca-bundle path Path to TLS certificate authority bundle (PEM format, optional, can be repeated) to use when connecting to the Concierge
|
||||
--concierge-credential-issuer string Concierge CredentialIssuer object to use for autodiscovery (default: autodiscover)
|
||||
--concierge-endpoint string API base for the Concierge endpoint
|
||||
--concierge-mode mode Concierge mode of operation (default TokenCredentialRequestAPI)
|
||||
--concierge-skip-wait Skip waiting for any pending Concierge strategies to become ready (default: false)
|
||||
-h, --help help for kubeconfig
|
||||
--kubeconfig string Path to kubeconfig file
|
||||
--kubeconfig-context string Kubeconfig context name (default: current active context)
|
||||
--no-concierge Generate a configuration which does not use the Concierge, but sends the credential to the cluster directly
|
||||
--oidc-ca-bundle path Path to TLS certificate authority bundle (PEM format, optional, can be repeated)
|
||||
--oidc-client-id string OpenID Connect client ID (default: autodiscover) (default "pinniped-cli")
|
||||
--oidc-issuer string OpenID Connect issuer URL (default: autodiscover)
|
||||
--oidc-listen-port uint16 TCP port for localhost listener (authorization code flow only)
|
||||
--oidc-request-audience string Request a token with an alternate audience using RFC8693 token exchange
|
||||
--oidc-scopes strings OpenID Connect scopes to request during login (default [offline_access,openid,pinniped:request-audience])
|
||||
--oidc-session-cache string Path to OpenID Connect session cache file
|
||||
--oidc-skip-browser During OpenID Connect login, skip opening the browser (just print the URL)
|
||||
-o, --output string Output file path (default: stdout)
|
||||
--skip-validation Skip final validation of the kubeconfig (default: false)
|
||||
--static-token string Instead of doing an OIDC-based login, specify a static token
|
||||
--static-token-env string Instead of doing an OIDC-based login, read a static token from the environment
|
||||
--timeout duration Timeout for autodiscovery and validation (default 10m0s)
|
||||
```
|
||||
|
||||
Concierge API group suffix (default "pinniped.dev")
|
||||
- `--concierge-authenticator-name string`:
|
||||
### SEE ALSO
|
||||
|
||||
Concierge authenticator name (default: autodiscover)
|
||||
- `--concierge-authenticator-type string`:
|
||||
* [pinniped get]() - get
|
||||
|
||||
Concierge authenticator type (e.g., 'webhook', 'jwt') (default: autodiscover)
|
||||
- `--concierge-ca-bundle path`:
|
||||
## pinniped help
|
||||
|
||||
Path to TLS certificate authority bundle (PEM format, optional, can be repeated) to use when connecting to the Concierge
|
||||
- `--concierge-credential-issuer string`:
|
||||
Help about any command
|
||||
|
||||
Concierge CredentialIssuer object to use for autodiscovery (default: autodiscover)
|
||||
- `--concierge-endpoint string`:
|
||||
### Synopsis
|
||||
|
||||
API base for the Concierge endpoint
|
||||
- `--concierge-mode mode`:
|
||||
Help provides help for any command in the application.
|
||||
Simply type pinniped help [path to command] for full details.
|
||||
|
||||
Concierge mode of operation (default TokenCredentialRequestAPI)
|
||||
- `--concierge-skip-wait`:
|
||||
```
|
||||
pinniped help [command] [flags]
|
||||
```
|
||||
|
||||
Skip waiting for any pending Concierge strategies to become ready (default: false)
|
||||
- `--kubeconfig string`:
|
||||
### Options
|
||||
|
||||
Path to kubeconfig file
|
||||
- `--kubeconfig-context string`:
|
||||
```
|
||||
-h, --help help for help
|
||||
```
|
||||
|
||||
Kubeconfig context name (default: current active context)
|
||||
- `--no-concierge`:
|
||||
### SEE ALSO
|
||||
|
||||
Generate a configuration which does not use the Concierge, but sends the credential to the cluster directly
|
||||
- `--oidc-ca-bundle path`:
|
||||
* [pinniped]() - pinniped
|
||||
|
||||
Path to TLS certificate authority bundle (PEM format, optional, can be repeated)
|
||||
- `--oidc-client-id string`:
|
||||
## pinniped version
|
||||
|
||||
OpenID Connect client ID (default: autodiscover) (default "pinniped-cli")
|
||||
- `--oidc-issuer string`:
|
||||
Print the version of this Pinniped CLI
|
||||
|
||||
OpenID Connect issuer URL (default: autodiscover)
|
||||
- `--oidc-listen-port uint16`:
|
||||
```
|
||||
pinniped version [flags]
|
||||
```
|
||||
|
||||
TCP port for localhost listener (authorization code flow only)
|
||||
- `--oidc-request-audience string`:
|
||||
### Options
|
||||
|
||||
Request a token with an alternate audience using RFC8693 token exchange
|
||||
- `--oidc-scopes strings`:
|
||||
```
|
||||
-h, --help help for version
|
||||
```
|
||||
|
||||
OpenID Connect scopes to request during login (default [offline_access,openid,pinniped:request-audience])
|
||||
- `--oidc-session-cache string`:
|
||||
### SEE ALSO
|
||||
|
||||
Path to OpenID Connect session cache file
|
||||
- `--oidc-skip-browser`:
|
||||
* [pinniped]() - pinniped
|
||||
|
||||
During OpenID Connect login, skip opening the browser (just print the URL)
|
||||
- `-o`, `--output string`:
|
||||
## pinniped whoami
|
||||
|
||||
Output file path (default: stdout)
|
||||
- `--skip-validation`:
|
||||
Print information about the current user
|
||||
|
||||
Skip final validation of the kubeconfig (default: false)
|
||||
- `--static-token string`:
|
||||
```
|
||||
pinniped whoami [flags]
|
||||
```
|
||||
|
||||
Instead of doing an OIDC-based login, specify a static token
|
||||
- `--static-token-env string`:
|
||||
### Options
|
||||
|
||||
Instead of doing an OIDC-based login, read a static token from the environment
|
||||
- `--timeout duration`:
|
||||
```
|
||||
--api-group-suffix string Concierge API group suffix (default "pinniped.dev")
|
||||
-h, --help help for whoami
|
||||
--kubeconfig string Path to kubeconfig file
|
||||
--kubeconfig-context string Kubeconfig context name (default: current active context)
|
||||
-o, --output string Output format (e.g., 'yaml', 'json', 'text') (default "text")
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [pinniped]() - pinniped
|
||||
|
||||
Timeout for autodiscovery and validation (default 10m0s)
|
||||
|
||||
@@ -58,12 +58,4 @@ From contributing code to uploading documentation to sharing how you’d like to
|
||||
|
||||
As to where the name “pinniped” come from - Pinnipeds are marine mammals that have front and rear flippers, such as seals. A “seal” is also a mark of authenticity. And that’s what Pinniped hopes to be: a seal or mark of authenticity across and between Kubernetes clusters.
|
||||
|
||||
### Join the Pinniped community
|
||||
|
||||
- Follow us on Twitter at [@projectpinniped](https://twitter.com/projectpinniped)
|
||||
- Join our Kubernetes Slack channel so you can talk to project maintainers and other community members: [#pinniped](https://go.pinniped.dev/community/slack)
|
||||
- Join our [Google Group](https://go.pinniped.dev/community/group) to get updates on the project and invites to community meetings
|
||||
|
||||
Join the [Pinniped Community Meetings](https://go.pinniped.dev/community), which are held every first and third Thursday at 9am PT via [Zoom](https://go.pinniped.dev/community/zoom), and read and comment on the [meeting agenda](https://go.pinniped.dev/community/agenda).
|
||||
|
||||
- If you are ready to jump in and test, add code, or help with documentation, follow the instructions on our [Contributing to Pinniped](https://go.pinniped.dev/community) page for guidance.
|
||||
{{< community >}}
|
||||
@@ -113,14 +113,7 @@ This would require major changes and it would be challenging to support some fea
|
||||
|
||||
As a team, we have no immediate plans for either of these ideas, but if you are interested please [reach out in GitHub][discussion].
|
||||
|
||||
## Join the Pinniped Community!
|
||||
Pinniped is better because of our contributors and maintainers.
|
||||
It is because of you that we can bring great software to the community.
|
||||
|
||||
Please join us during our online community meetings, occurring every first and third Thursday of the month at 9AM PT / 12PM PT.
|
||||
Use [this Zoom link][zoom] to attend and add any agenda items you wish to discuss to [the notes document][meeting-notes].
|
||||
|
||||
Join our [Google Group][google-group] to receive invites to this meeting.
|
||||
{{< community >}}
|
||||
|
||||
[api-aggregation]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/
|
||||
[apiserver-pkg]: https://pkg.go.dev/k8s.io/apiserver/pkg/server
|
||||
@@ -128,12 +121,10 @@ Join our [Google Group][google-group] to receive invites to this meeting.
|
||||
[crd]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/
|
||||
[custom-scheme]: https://github.com/vmware-tanzu/pinniped/blob/main/internal/concierge/server/server.go#L182
|
||||
[discussion]: https://github.com/vmware-tanzu/pinniped/discussions/386
|
||||
[google-group]: https://go.pinniped.dev/community/group
|
||||
[groupsuffix]: https://github.com/vmware-tanzu/pinniped/blob/main/internal/groupsuffix/groupsuffix.go
|
||||
[ingress-spec]: https://kubernetes.io/docs/reference/kubernetes-api/services-resources/ingress-v1/#IngressSpec
|
||||
[kubeclient-client]: https://github.com/vmware-tanzu/pinniped/blob/v0.5.0/internal/kubeclient/kubeclient.go#L22
|
||||
[kubeclient-middleware]: https://github.com/vmware-tanzu/pinniped/blob/v0.5.0/internal/kubeclient/middleware.go#L17-L19
|
||||
[meeting-notes]: https://go.pinniped.dev/community/agenda
|
||||
[ownerreferences]: https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#owners-and-dependents
|
||||
[prepare-controllers]: https://github.com/vmware-tanzu/pinniped/blob/v0.5.0/internal/controllermanager/prepare_controllers.go#L116-L120
|
||||
[rest-config-wrap]: https://pkg.go.dev/k8s.io/client-go/rest#Config.Wrap
|
||||
@@ -143,4 +134,3 @@ Join our [Google Group][google-group] to receive invites to this meeting.
|
||||
[ytt-crd-overlay]: https://github.com/vmware-tanzu/pinniped/blob/v0.5.0/deploy/concierge/z0_crd_overlay.yaml
|
||||
[ytt-deployment]: https://github.com/vmware-tanzu/pinniped/blob/v0.5.0/deploy/concierge/deployment.yaml#L195
|
||||
[ytt]: https://carvel.dev/ytt/
|
||||
[zoom]: https://go.pinniped.dev/community/zoom
|
||||
163
site/content/posts/2021-04-01-concierge-on-managed-clusters.md
Normal file
163
site/content/posts/2021-04-01-concierge-on-managed-clusters.md
Normal file
@@ -0,0 +1,163 @@
|
||||
---
|
||||
title: "Pinniped v0.7.0: Enabling multi-cloud, multi-provider Kubernetes"
|
||||
slug: bringing-the-concierge-to-more-clusters
|
||||
date: 2021-04-01
|
||||
author: Matt Moyer
|
||||
image: https://images.unsplash.com/photo-1525125804400-4b77d2bc5ada?ixlib=rb-1.2.1&ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&auto=format&fit=crop&w=1674&q=80
|
||||
excerpt: "With the release of v0.7.0, Pinniped now supports a much wider range of real-world Kubernetes clusters, including managed Kubernetes environments on all major cloud providers."
|
||||
tags: ['Matt Moyer', 'release']
|
||||
---
|
||||
|
||||

|
||||
*Photo by [Fred Heap](https://unsplash.com/@fred_heap) on [Unsplash](https://unsplash.com/s/photos/seal)*
|
||||
|
||||
Pinniped is a "batteries included" authentication system for Kubernetes clusters.
|
||||
With the release of v0.7.0, Pinniped now supports a much wider range of real-world Kubernetes clusters, including managed Kubernetes environments on all major cloud providers.
|
||||
|
||||
This post describes how v0.7.0 fits into Pinniped's quest to bring a smooth, unified login experience to all Kubernetes clusters.
|
||||
|
||||
## Authentication in Kubernetes
|
||||
|
||||
Kubernetes includes a pluggable authentication system right out of the box.
|
||||
While it doesn't have an end-to-end login flow for users, it does support [many ways][kube-authn] to authenticate individual requests.
|
||||
These include JSON Web Tokens (JWTs), x509 client certificates, and opaque bearer tokens validated by an external webhook.
|
||||
|
||||
As a cluster administrator, you can configure these options by passing the appropriate command-line flags to the `kube-apiserver` process.
|
||||
For example, to configure x509 client certificates, you must set the `--client-ca-file` flag to reference an x509 certificate authority bundle.
|
||||
|
||||
If you are hand-crafting a Kubernetes installation or building a custom distribution, you can use these options to integrate Kubernetes into your existing identity infrastructure.
|
||||
|
||||
However, in many real-world scenarios your options are more limited:
|
||||
|
||||
- If you run your clusters using managed services such as Amazon Elastic Kubernetes Service (EKS), Azure Kubernetes Service (AKS), or Google Kubernetes Engine (GKE), you won't have access to set the required flags.
|
||||
These cloud providers don't allow cluster administrators to set arbitrary API server command-line flags, so you must use their respective built-in identity systems.
|
||||
|
||||
- Even if you build and install your own Kubernetes clusters, changing `kube-apiserver` flags requires reconfiguring and restarting the cluster control plane.
|
||||
This can be a daunting task if you have dozens or hundreds of disparate existing clusters spread across an enterprise.
|
||||
|
||||
Pinniped closes these gaps by enabling _dynamic reconfiguration_ of Kubernetes authentication on _existing clusters_.
|
||||
This empowers cluster administrators to unify cluster login flows across all their clusters, even when they span multiple clouds and providers.
|
||||
|
||||
## The Concierge
|
||||
|
||||
The Pinniped [_Concierge_]({{< ref "docs/howto/install-concierge.md" >}}) component implements cluster-level authentication.
|
||||
It runs on each Kubernetes cluster to enable Pinniped-based logins on that cluster.
|
||||
When a new user arrives, the Concierge server verifies the user's external identity and helps them access the cluster.
|
||||
|
||||
The design of the Concierge supports multiple backend _strategies_.
|
||||
Each strategy helps Pinniped integrate with some class of Kubernetes clusters.
|
||||
|
||||
### Concierge before v0.7.0
|
||||
|
||||
Although the Concierge design allows for multiple strategies, before v0.7.0 there was only one: `KubeClusterSigningCertificate`.
|
||||
|
||||
When the Concierge starts, the `KubeClusterSigningCertificate` strategy:
|
||||
|
||||
1. Looks for a `kube-controller-manager` pod in the `kube-system` namespace.
|
||||
If it finds no such pod, it marks the strategy as failed.
|
||||
|
||||
1. Creates a "kube cert agent" pod running in the Concierge namespace.
|
||||
This pod has all the same [node selectors][nodeselector], [tolerations][tolerations], and [host volume mounts][hostpath] as the original `kube-controller-manager` pod, but simply runs a `sleep` command.
|
||||
|
||||
1. Uses the pod `exec` API to connect and run `cat`.
|
||||
Using this technique, it reads both the cluster signing certificate (`--cluster-signing-cert-file`) and key (`--cluster-signing-key-file`) and loads them into an in-memory certificate signer in the main Concierge process.
|
||||
|
||||
Later, when a user runs `kubectl`:
|
||||
|
||||
1. The `kubectl` process invokes the Pinniped ExecCredential plugin.
|
||||
The plugin code obtains the user's external credential, then sends a [TokenCredentialRequest][tcr] to the cluster's [aggregated API server][api-aggregation] endpoint.
|
||||
|
||||
1. The TokenCredentialRequest handler in the Concierge validates the user's external credential.
|
||||
Once the it has authenticated the user, it uses the cluster signing certificate to issue and return a short-lived client certificate encoding the user's identity.
|
||||
This certificate is valid for five minutes.
|
||||
|
||||
1. The plugin code passes the short-lived certificate back to `kubectl`, which makes its authenticated API requests to the Kubernetes API server using the temporary client certificate.
|
||||
|
||||
This strategy works on clusters where the `kube-controller-manager` runs as a normal pod on a schedulable cluster node.
|
||||
This includes many real-world clusters including those created by [kubeadm][kubeadm].
|
||||
|
||||
It has little or no performance overhead because Pinniped isn't directly in the request path.
|
||||
Because all the interactions between the client and the Concierge happen via Kubernetes API aggregation, it doesn't require any additional ingress or external load balancer support.
|
||||
This also makes it great for simple use cases such as [kind][kind].
|
||||
|
||||
However, it comes with one big caveat: it doesn't support any of the most popular managed Kubernetes services.
|
||||
|
||||
### Adding support for managed clusters
|
||||
|
||||
On popular managed Kubernetes services, the Kubernetes control plane isn't accessible to the usual cluster administrator.
|
||||
This requires a new strategy: `ImpersonationProxy`.
|
||||
|
||||
When the Concierge is starts, the `ImpersonationProxy` strategy:
|
||||
|
||||
1. Looks for nodes labeled as control plane nodes.
|
||||
If it finds any, it puts itself in an inactive state as it's not needed.
|
||||
|
||||
1. Starts serving an HTTPS endpoint on TCP port 8444.
|
||||
This endpoint serves as an _impersonating proxy_ for the Kubernetes API (more details on this below).
|
||||
|
||||
1. Creates a Service of `type: LoadBalancer` and waits for the cloud provider to assign it an external hostname or IP address.
|
||||
|
||||
1. Issues an x509 certificate authority and serving certificates for the external endpoint.
|
||||
Clients use this certificate authority to verify connections to the impersonation proxy.
|
||||
|
||||
1. Issues an x509 certificate authority for issuing client certificates.
|
||||
This client CA isn't trusted by Kubernetes but is trusted by the impersonation proxy handler.
|
||||
|
||||
Later, when a user runs `kubectl`:
|
||||
|
||||
1. As before, the `kubectl` process invokes the Pinniped ExecCredential plugin (part of the `pinniped` command-line tool).
|
||||
The plugin code obtains the user's external credential, then makes a [TokenCredentialRequest][tcr].
|
||||
This request happens as an anonymous request to the impersonation proxy endpoint.
|
||||
|
||||
1. The TokenCredentialRequest handler in the Concierge validates the user's external credentials.
|
||||
Once it has authenticated the user, it uses the _Pinniped_ client signing certificate to issue and return a short-lived (5m) client certificate encoding the user's identity.
|
||||
This certificate is only valid when presented to the impersonation proxy, not when presented directly to the real Kubernetes API server.
|
||||
|
||||
1. The plugin code passes the short-lived certificate back to `kubectl`.
|
||||
Unlike before, the kubeconfig now points at the impersonation proxy endpoint.
|
||||
|
||||
1. The impersonation proxy receives the incoming request from `kubectl` and authenticates it via the client certificate.
|
||||
Once it knows the user's identity, it impersonates the authenticated user by adding [`Impersonate-` headers][impersonation].
|
||||
It forwards the impersonating request to the real Kubernetes API server and proxies the response back to the user.
|
||||
|
||||
This strategy works on any conformant cluster with working LoadBalancer service support.
|
||||
It has some disadvantages, namely the overhead involved in proxying requests and the extra setup time required to provision a LoadBalancer service.
|
||||
|
||||
## Conclusion and future work
|
||||
|
||||
Pinniped now supports a large majority of real-world Kubernetes clusters!
|
||||
Our automated test suite ensures that Pinniped is stable and functional across a wide range of Kubernetes versions and several providers including EKS, AKS, and GKE.
|
||||
|
||||
This is a great start but there are more strategies left to build:
|
||||
|
||||
- A strategy that loads the cluster signing certificate/key directly from a Secret (for example, as it appears in OpenShift).
|
||||
|
||||
- A strategy that takes advantage of future CertificateSigningRequest API enhancements that support short-lived certificates (see [kubernetes/kubernetes#99494][csr-notafter]).
|
||||
|
||||
- A strategy that issues non-certificate credentials, such as if a cluster has been statically configured to trust a JWT issuer.
|
||||
|
||||
The current implementation also has a few missing features:
|
||||
|
||||
- There is no support for "nested" impersonation.
|
||||
This means you can't use the `--as` or `--as-group` flags in `kubectl` when you're connecting through the impersonation proxy.
|
||||
|
||||
- It only supports certificate-based authentication.
|
||||
You can't authenticate to the impersonation proxy directly with a ServiceAccount token, for example.
|
||||
|
||||
- Depending on your cloud provider's LoadBalancer implementation, you may experience timeouts in long idle requests.
|
||||
For example, a `kubectl logs` command for a quiet app may exit after as few as four minutes of silence.
|
||||
|
||||
We invite your suggestions and contributions to make Pinniped work across all flavors of Kubernetes.
|
||||
|
||||
{{< community >}}
|
||||
|
||||
[api-aggregation]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/]
|
||||
[csr-notafter]: https://github.com/kubernetes/kubernetes/pull/99494
|
||||
[hostpath]: https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
|
||||
[impersonation]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation
|
||||
[kind]: https://kind.sigs.k8s.io/
|
||||
[kube-authn]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/
|
||||
[kubeadm]: https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/
|
||||
[nodeselector]: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
|
||||
[tcr]: https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#tokencredentialrequest
|
||||
[tolerations]: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
||||
@@ -37,7 +37,8 @@ HUGO_ENABLEGITINFO = "true"
|
||||
[[headers]]
|
||||
for = "/*"
|
||||
[headers.values]
|
||||
Content-Security-Policy = "default-src 'self'; img-src *"
|
||||
# disabled to support docsearch until https://github.com/algolia/instantsearch.js/issues/2868 is fixed.
|
||||
# Content-Security-Policy = "default-src 'self'; img-src *"
|
||||
X-Content-Type-Options = "nosniff"
|
||||
X-Frame-Options = "DENY"
|
||||
X-XSS-Protection = "1; mode=block"
|
||||
@@ -15,10 +15,19 @@
|
||||
{{ with .OutputFormats.Get "RSS" -}}
|
||||
{{ printf `<link rel="%s" type="%s" href="%s" title="%s">` .Rel .MediaType.Type .RelPermalink $.Site.Title | safeHTML }}
|
||||
{{- end }}
|
||||
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css" />
|
||||
</head>
|
||||
<body>
|
||||
{{ partial "header" . }}
|
||||
{{ block "main" . }}{{ end }}
|
||||
{{ partial "footer" . }}
|
||||
<script type="text/javascript" src="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js"></script>
|
||||
<script type="text/javascript"> docsearch({
|
||||
apiKey: '8f37841a145124eb42a0e3249d49a3c5',
|
||||
indexName: 'pinniped',
|
||||
inputSelector: '.docsearch-input',
|
||||
debug: false // Set debug to true if you want to inspect the dropdown
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
</div>
|
||||
<div class="wrapper blog landing">
|
||||
<div class="grid three">
|
||||
{{ range (.Paginator 9).Pages.ByDate }}
|
||||
{{ range (.Paginator 9).Pages.ByDate.Reverse }}
|
||||
{{ partial "blog-post-card.html" . }}
|
||||
{{ end }}
|
||||
</div>
|
||||
|
||||
@@ -1,4 +1,12 @@
|
||||
<div class="side-nav">
|
||||
<form class="d-flex align-items-center">
|
||||
<span class="algolia-autocomplete" style="position: relative; display: inline-block; direction: ltr;">
|
||||
<input type="search" class="form-control docsearch-input" id="search-input" placeholder="Search..."
|
||||
aria-label="Search for..." autocomplete="off" spellcheck="false" role="combobox"
|
||||
aria-autocomplete="list" aria-expanded="false" aria-owns="algolia-autocomplete-listbox-0"
|
||||
dir="auto" style="position: relative; vertical-align: top;">
|
||||
</span>
|
||||
</form>
|
||||
<ul>
|
||||
{{- $currentPage := . }}
|
||||
{{- range .Site.Menus.docs }}
|
||||
|
||||
14
site/themes/pinniped/layouts/shortcodes/community.html
Normal file
14
site/themes/pinniped/layouts/shortcodes/community.html
Normal file
@@ -0,0 +1,14 @@
|
||||
<div class="join-the-community">
|
||||
<h3>Join the Pinniped community!</h3>
|
||||
<p>
|
||||
Pinniped is better because of our contributors and maintainers.
|
||||
It's because of you that we can bring great software to the community.
|
||||
</p>
|
||||
<p>
|
||||
Please join us during our online community meetings, occurring every first and third Thursday of the month at 9 AM PT / 12 PM ET.
|
||||
Use <a href="https://go.pinniped.dev/community/zoom">this Zoom link</a> to attend and add any agenda items you wish to discuss to <a href="https://go.pinniped.dev/community/agenda">the notes document</a>.
|
||||
</p>
|
||||
<p>
|
||||
Join our <a href="https://go.pinniped.dev/community/group">Google Group</a> to receive invites to this meeting.
|
||||
</p>
|
||||
</div>
|
||||
29
site/themes/pinniped/static/img/ok-amba.svg
Normal file
29
site/themes/pinniped/static/img/ok-amba.svg
Normal file
@@ -0,0 +1,29 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 104 44" style="enable-background:new 0 0 104 44;" xml:space="preserve" shape-rendering="geometricPrecision">
|
||||
<path style="fill:#DB001B;" d="M45.9,40.8c0.4,0,0.7-0.2,1-0.3c0.3-0.1,0.6-0.3,0.9-0.6c0.3-0.2,0.5-0.5,0.7-0.8
|
||||
c0.5-0.6,0.7-1.3,1-1.9c0.1-0.3,0.3-0.7,0.3-1.1l7-28.9C57,6.7,57.1,6.1,57,5.6C57,5.3,57,5,56.8,4.8c-0.1-0.3-0.3-0.5-0.4-0.7
|
||||
c-0.1-0.1-0.3-0.3-0.4-0.4c-0.3-0.3-0.8-0.4-1.2-0.4L55.1,2h19.7l-0.3,1.5c-0.3,0-0.7,0.1-1,0.1c-0.3,0.1-0.6,0.2-0.9,0.4
|
||||
c-0.2,0.1-0.4,0.3-0.6,0.5c-0.2,0.2-0.4,0.4-0.5,0.6c-0.1,0.2-0.3,0.4-0.4,0.7C71.1,6,71,6.2,71,6.5l-3.4,13.7L85.8,5.8
|
||||
c0.2-0.1,0.3-0.3,0.5-0.5c0.1-0.2,0.3-0.3,0.4-0.5c0.2-0.3,0.2-0.6,0.1-0.9c-0.1-0.1-0.1-0.2-0.2-0.3c-0.2-0.2-0.4-0.3-0.6-0.3
|
||||
c-0.1,0-0.2,0-0.3,0L85.9,2H103l-0.3,1.4c-0.4,0.1-0.9,0.2-1.3,0.3c-0.4,0.1-0.9,0.3-1.3,0.5c-0.5,0.2-1,0.5-1.4,0.8
|
||||
c-0.5,0.3-0.9,0.6-1.3,0.9l-16,12.8l12.3,16.8c0.4,0.5,0.8,1.1,1.2,1.6c0.4,0.5,0.8,1,1.3,1.5c0.3,0.3,0.5,0.5,0.8,0.8
|
||||
c0.3,0.2,0.5,0.5,0.8,0.7c0.2,0.2,0.4,0.3,0.6,0.4c0.2,0.1,0.4,0.2,0.7,0.3l-0.3,1.1c-2.8,0.7-5.6,1-8.4,0.9
|
||||
c-1.1-0.1-2.1-0.2-3.1-0.3c-0.8-0.1-1.6-0.3-2.3-0.5c-0.7-0.3-1.5-0.6-2.2-0.9c-0.4-0.2-0.8-0.5-1.1-0.7c-0.4-0.3-0.7-0.6-1-0.9
|
||||
c-0.3-0.3-0.7-0.7-1-1.1c-0.3-0.4-0.6-0.8-0.9-1.2L67.4,21l-3.9,16c-0.1,0.6-0.2,1.2-0.1,1.8c0,0.2,0,0.3,0.1,0.5
|
||||
c0.1,0.2,0.2,0.3,0.3,0.4c0.1,0.1,0.2,0.3,0.3,0.3c0.2,0.2,0.5,0.3,0.7,0.4c0.3,0.1,0.5,0.3,0.8,0.3L65.4,42H45.7L45.9,40.8z"/>
|
||||
<path style="fill:#DB001B;" d="M21.6,36c1.5,0.1,3.1-0.1,4.6-0.6c0.5-0.2,1-0.4,1.4-0.6c0.6-0.3,1.2-0.7,1.7-1.1
|
||||
c0.5-0.4,1-0.9,1.4-1.5c0.6-0.8,1.1-1.6,1.6-2.5c0.5-0.9,0.8-1.7,1.2-2.7c0.3-1,0.6-2,0.9-3c0.3-1,0.5-2,0.7-3
|
||||
c0.3-1.7,0.5-3.4,0.6-5.1c0-1.1-0.1-2.2-0.4-3.3c-0.2-0.5-0.4-1-0.6-1.5c-0.3-0.5-0.6-0.9-1-1.2c-0.5-0.5-1.1-0.9-1.7-1.2
|
||||
c-0.6-0.3-1.3-0.5-2-0.7C29.3,8,28.6,8,27.8,8c-1.5,0-3,0.3-4.4,0.9c-0.7,0.3-1.3,0.6-2,1.1c-0.5,0.4-1,0.8-1.4,1.2
|
||||
c-0.4,0.4-0.8,0.9-1.1,1.5c-0.5,0.8-0.9,1.6-1.3,2.5c-0.4,0.8-0.7,1.7-1,2.6c-0.3,0.9-0.6,1.9-0.8,2.8c-0.4,1.6-0.8,3.1-1,4.7
|
||||
c-0.1,1.1-0.3,2.2-0.2,3.2c0.1,0.9,0.2,1.9,0.5,2.8c0.1,0.4,0.3,0.9,0.6,1.3c0.3,0.5,0.6,0.9,1,1.3c0.4,0.4,0.8,0.8,1.3,1
|
||||
c0.6,0.3,1.1,0.6,1.8,0.8C20.3,35.9,20.9,36,21.6,36z M1.5,19.7c0.2-1.1,0.6-2.3,1-3.4c0.4-1,0.8-2,1.4-2.9c0.5-0.9,1.2-1.8,1.8-2.7
|
||||
c0.8-0.9,1.6-1.8,2.4-2.6c0.9-0.8,1.8-1.5,2.8-2.2c0.9-0.6,1.9-1.2,2.8-1.7c1-0.5,2-1,3-1.3c1-0.4,1.9-0.7,2.9-0.9
|
||||
c2.6-0.7,5.4-1,8-1c2.4,0,4.8,0.2,7.2,1c0.8,0.2,1.6,0.5,2.3,0.8c0.8,0.3,1.5,0.7,2.2,1.1c0.8,0.4,1.5,0.9,2.2,1.4
|
||||
c0.7,0.5,1.4,1.1,2,1.6c0.6,0.6,1.2,1.2,1.8,1.9c0.6,0.6,1.1,1.4,1.5,2.1c0.5,0.7,0.9,1.5,1.2,2.3c0.6,1.5,1,3.1,1.2,4.7
|
||||
c0.2,1.8,0.2,3.7-0.1,5.6c-0.1,0.9-0.3,1.7-0.5,2.5c-0.2,0.8-0.5,1.6-0.9,2.4c-0.5,1.2-1.2,2.4-1.9,3.5c-0.7,1.1-1.5,2.1-2.4,3.1
|
||||
c-0.9,1-1.9,1.9-2.9,2.7s-2.2,1.5-3.4,2.1c-1.1,0.5-2.2,1-3.3,1.4c-2.4,0.9-4.9,1.4-7.3,1.7c-2.9,0.3-5.9,0.3-8.8-0.2
|
||||
C17,42.5,16,42.3,15.1,42c-0.9-0.3-1.8-0.6-2.7-1c-0.9-0.4-1.7-0.8-2.6-1.3c-0.8-0.5-1.6-1-2.3-1.7c-0.7-0.6-1.4-1.2-2-1.9
|
||||
c-0.6-0.7-1.2-1.4-1.7-2.2c-0.4-0.7-0.8-1.4-1.2-2.1c-0.6-1.2-1-2.5-1.3-3.8C1.1,26.5,1,24.9,1,23.3C1.1,22.1,1.2,20.9,1.5,19.7z"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 3.2 KiB |
@@ -1,11 +1,11 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cert-issuer
|
||||
namespace: dex
|
||||
namespace: tools
|
||||
labels:
|
||||
app: cert-issuer
|
||||
---
|
||||
@@ -13,7 +13,7 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: cert-issuer
|
||||
namespace: dex
|
||||
namespace: tools
|
||||
labels:
|
||||
app: cert-issuer
|
||||
rules:
|
||||
@@ -25,13 +25,13 @@ kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: cert-issuer
|
||||
namespace: dex
|
||||
namespace: tools
|
||||
labels:
|
||||
app: cert-issuer
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cert-issuer
|
||||
namespace: dex
|
||||
namespace: tools
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: cert-issuer
|
||||
@@ -41,7 +41,7 @@ apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: cert-issuer
|
||||
namespace: dex
|
||||
namespace: tools
|
||||
labels:
|
||||
app: cert-issuer
|
||||
spec:
|
||||
@@ -71,11 +71,21 @@ spec:
|
||||
-ca ca.pem -ca-key ca-key.pem \
|
||||
-config /tmp/cfssl-default.json \
|
||||
-profile www \
|
||||
-cn "dex.dex.svc.cluster.local" \
|
||||
-hostname "dex.dex.svc.cluster.local" \
|
||||
-cn "dex.tools.svc.cluster.local" \
|
||||
-hostname "dex.tools.svc.cluster.local" \
|
||||
/tmp/csr.json \
|
||||
| cfssljson -bare dex
|
||||
|
||||
echo "generating LDAP server certificate..."
|
||||
cfssl gencert \
|
||||
-ca ca.pem -ca-key ca-key.pem \
|
||||
-config /tmp/cfssl-default.json \
|
||||
-profile www \
|
||||
-cn "ldap.tools.svc.cluster.local" \
|
||||
-hostname "ldap.tools.svc.cluster.local" \
|
||||
/tmp/csr.json \
|
||||
| cfssljson -bare ldap
|
||||
|
||||
chmod -R 777 /var/certs
|
||||
|
||||
echo "generated certificates:"
|
||||
@@ -90,12 +100,12 @@ spec:
|
||||
args:
|
||||
- -c
|
||||
- |
|
||||
kubectl get secrets -n dex certs -o jsonpath='created: {.metadata.creationTimestamp}' || \
|
||||
kubectl create secret generic certs --from-file=/var/certs
|
||||
kubectl get secrets -n tools certs -o jsonpath='created: {.metadata.creationTimestamp}' || \
|
||||
kubectl create secret generic -n tools certs --from-file=/var/certs
|
||||
volumeMounts:
|
||||
- name: certs
|
||||
mountPath: /var/certs
|
||||
volumes:
|
||||
- name: certs
|
||||
emptyDir: {}
|
||||
restartPolicy: Never
|
||||
restartPolicy: Never
|
||||
@@ -6,7 +6,7 @@
|
||||
#@ load("@ytt:yaml", "yaml")
|
||||
|
||||
#@ def dexConfig():
|
||||
issuer: https://dex.dex.svc.cluster.local/dex
|
||||
issuer: https://dex.tools.svc.cluster.local/dex
|
||||
storage:
|
||||
type: sqlite3
|
||||
config:
|
||||
@@ -36,19 +36,12 @@ staticPasswords:
|
||||
userID: "061d23d1-fe1e-4777-9ae9-59cd12abeaaa"
|
||||
#@ end
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: dex
|
||||
labels:
|
||||
name: dex
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: dex-config
|
||||
namespace: dex
|
||||
namespace: tools
|
||||
labels:
|
||||
app: dex
|
||||
data:
|
||||
@@ -58,7 +51,7 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: dex
|
||||
namespace: dex
|
||||
namespace: tools
|
||||
labels:
|
||||
app: dex
|
||||
spec:
|
||||
@@ -102,7 +95,7 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: dex
|
||||
namespace: dex
|
||||
namespace: tools
|
||||
labels:
|
||||
app: dex
|
||||
spec:
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user