Merge branch 'main' of github.com:vmware-tanzu/pinniped into active-directory-identity-provider

This commit is contained in:
Margo Crawford
2021-08-26 16:21:08 -07:00
22 changed files with 511 additions and 111 deletions

View File

@@ -37,7 +37,8 @@ import (
"go.pinniped.dev/test/testlib/browsertest"
)
func TestCLIGetKubeconfigStaticToken(t *testing.T) {
// safe to run in parallel with serial tests since it only interacts with a test local webhook, see main_test.go.
func TestCLIGetKubeconfigStaticToken_Parallel(t *testing.T) {
env := testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
// Create a test webhook configuration to use with the CLI.

View File

@@ -17,7 +17,8 @@ import (
"go.pinniped.dev/test/testlib"
)
func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
// Never run this test in parallel since breaking discovery is disruptive, see main_test.go.
func TestAPIServingCertificateAutoCreationAndRotation_Disruptive(t *testing.T) {
env := testlib.IntegrationEnv(t)
defaultServingCertResourceName := env.ConciergeAppName + "-api-tls-serving-certificate"

View File

@@ -22,7 +22,8 @@ import (
"go.pinniped.dev/test/testlib"
)
func TestUnsuccessfulCredentialRequest(t *testing.T) {
// TCRs are non-mutating and safe to run in parallel with serial tests, see main_test.go.
func TestUnsuccessfulCredentialRequest_Parallel(t *testing.T) {
env := testlib.IntegrationEnv(t).WithCapability(testlib.AnonymousAuthenticationSupported)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
@@ -44,7 +45,8 @@ func TestUnsuccessfulCredentialRequest(t *testing.T) {
require.Equal(t, "authentication failed", *response.Status.Message)
}
func TestSuccessfulCredentialRequest(t *testing.T) {
// TCRs are non-mutating and safe to run in parallel with serial tests, see main_test.go.
func TestSuccessfulCredentialRequest_Parallel(t *testing.T) {
env := testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
ctx, cancel := context.WithTimeout(context.Background(), 6*time.Minute)
@@ -129,7 +131,8 @@ func TestSuccessfulCredentialRequest(t *testing.T) {
}
}
func TestFailedCredentialRequestWhenTheRequestIsValidButTheTokenDoesNotAuthenticateTheUser(t *testing.T) {
// TCRs are non-mutating and safe to run in parallel with serial tests, see main_test.go.
func TestFailedCredentialRequestWhenTheRequestIsValidButTheTokenDoesNotAuthenticateTheUser_Parallel(t *testing.T) {
_ = testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
// Create a testWebhook so we have a legitimate authenticator to pass to the
@@ -149,7 +152,8 @@ func TestFailedCredentialRequestWhenTheRequestIsValidButTheTokenDoesNotAuthentic
require.Equal(t, pointer.StringPtr("authentication failed"), response.Status.Message)
}
func TestCredentialRequest_ShouldFailWhenRequestDoesNotIncludeToken(t *testing.T) {
// TCRs are non-mutating and safe to run in parallel with serial tests, see main_test.go.
func TestCredentialRequest_ShouldFailWhenRequestDoesNotIncludeToken_Parallel(t *testing.T) {
_ = testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
// Create a testWebhook so we have a legitimate authenticator to pass to the

View File

@@ -948,9 +948,10 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "ignored-but-required",
Image: "busybox",
Command: []string{"sh", "-c", "sleep 3600"},
Name: "sleeper",
Image: env.ShellContainerImage,
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{"sh", "-c", "sleep 3600"},
},
},
ServiceAccountName: saName,
@@ -1064,7 +1065,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
// existing Concierge pod because we need more tools than we can get from a scratch/distroless base image.
runningTestPod := testlib.CreatePod(ctx, t, "impersonation-proxy", env.ConciergeNamespace, corev1.PodSpec{Containers: []corev1.Container{{
Name: "impersonation-proxy-test",
Image: "debian:10.10-slim",
Image: env.ShellContainerImage,
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{"bash", "-c", `while true; do read VAR; echo "VAR: $VAR"; done`},
Stdin: true,

View File

@@ -93,7 +93,8 @@ func findSuccessfulStrategy(credentialIssuer *conciergev1alpha.CredentialIssuer,
return nil
}
func TestLegacyPodCleaner(t *testing.T) {
// safe to run in parallel with serial tests since it only interacts with a test local pod, see main_test.go.
func TestLegacyPodCleaner_Parallel(t *testing.T) {
env := testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
@@ -117,9 +118,10 @@ func TestLegacyPodCleaner(t *testing.T) {
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "sleeper",
Image: "debian:10.9-slim",
Command: []string{"/bin/sleep", "infinity"},
Name: "sleeper",
Image: env.ShellContainerImage,
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{"/bin/sleep", "infinity"},
}},
},
}, metav1.CreateOptions{})

View File

@@ -15,7 +15,7 @@ import (
// Smoke test to see if the kubeconfig works and the cluster is reachable.
func TestGetNodes(t *testing.T) {
testlib.SkipUnlessIntegration(t)
_ = testlib.IntegrationEnv(t)
cmd := exec.Command("kubectl", "get", "nodes")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr

View File

@@ -24,7 +24,10 @@ import (
"go.pinniped.dev/test/testlib/browsertest"
)
func TestFormPostHTML(t *testing.T) {
// safe to run in parallel with serial tests since it only interacts with a test local server, see main_test.go.
func TestFormPostHTML_Parallel(t *testing.T) {
_ = testlib.IntegrationEnv(t)
// Run a mock callback handler, simulating the one running in the CLI.
callbackURL, expectCallback := formpostCallbackServer(t)

View File

@@ -24,7 +24,8 @@ import (
"go.pinniped.dev/test/testlib"
)
func TestLDAPSearch(t *testing.T) {
// safe to run in parallel with serial tests since it only makes read requests to our test LDAP server, see main_test.go.
func TestLDAPSearch_Parallel(t *testing.T) {
// This test does not interact with Kubernetes itself. It is a test of our LDAP client code, and only interacts
// with our test OpenLDAP server, which is exposed directly to this test via kubectl port-forward.
// Theoretically we should always be able to run this test, but something about the kubectl port forwarding

View File

@@ -6,7 +6,6 @@ package integration
import (
"context"
"encoding/json"
"errors"
"testing"
"time"
@@ -28,19 +27,18 @@ import (
"go.pinniped.dev/test/testlib"
)
func TestLeaderElection(t *testing.T) {
// safe to run in parallel with serial tests since it only interacts with a test local lease, see main_test.go.
func TestLeaderElection_Parallel(t *testing.T) {
_ = testlib.IntegrationEnv(t)
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute)
t.Cleanup(cancel)
leaseName := "leader-election-" + rand.String(5)
namespace := testlib.CreateNamespace(ctx, t, leaseName)
clients := leaderElectionClients(t, namespace, leaseName)
clients, cancels := leaderElectionClients(t, namespace, leaseName)
// the tests below are order dependant to some degree and definitely cannot be run in parallel
@@ -68,9 +66,52 @@ func TestLeaderElection(t *testing.T) {
lease := checkOnlyLeaderCanWrite(ctx, t, namespace, leaseName, clients)
logLease(t, lease)
})
t.Run("stop current leader", func(t *testing.T) {
startLease := waitForIdentity(ctx, t, namespace, leaseName, clients)
startTransitions := *startLease.Spec.LeaseTransitions
startTime := *startLease.Spec.AcquireTime
startLeaderIdentity := *startLease.Spec.HolderIdentity
leaderClient := clients[startLeaderIdentity]
err := runWriteRequest(ctx, leaderClient)
require.NoError(t, err)
// emulate stopping the leader process
cancels[startLeaderIdentity]()
delete(clients, startLeaderIdentity)
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
err := runWriteRequest(ctx, leaderClient)
requireEventually.ErrorIs(err, leaderelection.ErrNotLeader, "leader should no longer be able to write")
}, time.Minute, time.Second)
if len(clients) > 0 {
finalLease := waitForIdentity(ctx, t, namespace, leaseName, clients)
finalTransitions := *finalLease.Spec.LeaseTransitions
finalTime := *finalLease.Spec.AcquireTime
finalLeaderIdentity := *finalLease.Spec.HolderIdentity
require.Greater(t, finalTransitions, startTransitions)
require.Greater(t, finalTime.UnixNano(), startTime.UnixNano())
require.NotEqual(t, startLeaderIdentity, finalLeaderIdentity, "should have elected new leader")
logLease(t, finalLease)
}
})
t.Run("sanity check write prevention after stopping leader", func(t *testing.T) {
if len(clients) == 0 {
t.Skip("no clients left to check")
}
lease := checkOnlyLeaderCanWrite(ctx, t, namespace, leaseName, clients)
logLease(t, lease)
})
}
func leaderElectionClient(t *testing.T, namespace *corev1.Namespace, leaseName, identity string) *kubeclient.Client {
func leaderElectionClient(t *testing.T, namespace *corev1.Namespace, leaseName, identity string) (*kubeclient.Client, context.CancelFunc) {
t.Helper()
podInfo := &downward.PodInfo{
@@ -119,23 +160,24 @@ func leaderElectionClient(t *testing.T, namespace *corev1.Namespace, leaseName,
leaderCancel()
}()
return client
return client, controllerCancel
}
func leaderElectionClients(t *testing.T, namespace *corev1.Namespace, leaseName string) map[string]*kubeclient.Client {
func leaderElectionClients(t *testing.T, namespace *corev1.Namespace, leaseName string) (map[string]*kubeclient.Client, map[string]context.CancelFunc) {
t.Helper()
count := rand.IntnRange(1, 6)
out := make(map[string]*kubeclient.Client, count)
clients := make(map[string]*kubeclient.Client, count)
cancels := make(map[string]context.CancelFunc, count)
for i := 0; i < count; i++ {
identity := "leader-election-client-" + rand.String(5)
out[identity] = leaderElectionClient(t, namespace, leaseName, identity)
clients[identity], cancels[identity] = leaderElectionClient(t, namespace, leaseName, identity)
}
t.Logf("running leader election client tests with %d clients: %v", len(out), sets.StringKeySet(out).List())
t.Logf("running leader election client tests with %d clients: %v", len(clients), sets.StringKeySet(clients).List())
return out
return clients, cancels
}
func pickRandomLeaderElectionClient(clients map[string]*kubeclient.Client) *kubeclient.Client {
@@ -155,14 +197,17 @@ func waitForIdentity(ctx context.Context, t *testing.T, namespace *corev1.Namesp
testlib.RequireEventuallyWithoutError(t, func() (bool, error) {
lease, err := pickRandomLeaderElectionClient(clients).Kubernetes.CoordinationV1().Leases(namespace.Name).Get(ctx, leaseName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
t.Logf("lease %s/%s does not exist", namespace.Name, leaseName)
return false, nil
}
if err != nil {
return false, err
}
out = lease
t.Logf("lease %s/%s - current leader identity: %s, valid leader identities: %s",
namespace.Name, leaseName, pointer.StringDeref(lease.Spec.HolderIdentity, "<nil>"), identities.List())
return lease.Spec.HolderIdentity != nil && identities.Has(*lease.Spec.HolderIdentity), nil
}, 3*time.Minute, time.Second)
}, 10*time.Minute, 10*time.Second)
return out
}
@@ -209,12 +254,12 @@ func checkOnlyLeaderCanWrite(ctx context.Context, t *testing.T, namespace *corev
} else {
nonLeaders++
requireEventually.Error(err, "non leader client %q should have write error but it was nil", identity)
requireEventually.True(errors.Is(err, leaderelection.ErrNotLeader), "non leader client %q should have write error: %v", identity, err)
requireEventually.ErrorIs(err, leaderelection.ErrNotLeader, "non leader client %q should have write error: %v", identity, err)
}
}
requireEventually.Equal(1, leaders, "did not see leader")
requireEventually.Equal(len(clients)-1, nonLeaders, "did not see non-leader")
}, time.Minute, time.Second)
}, 3*time.Minute, 3*time.Second)
return lease
}
@@ -231,7 +276,7 @@ func forceTransition(ctx context.Context, t *testing.T, namespace *corev1.Namesp
startTime = *startLease.Spec.AcquireTime
startLease = startLease.DeepCopy()
startLease.Spec.HolderIdentity = pointer.String("some-other-client" + rand.String(5))
startLease.Spec.HolderIdentity = pointer.String("some-other-client-" + rand.String(5))
_, err := pickCurrentLeaderClient(ctx, t, namespace, leaseName, clients).
Kubernetes.CoordinationV1().Leases(namespace.Name).Update(ctx, startLease, metav1.UpdateOptions{})
@@ -246,8 +291,6 @@ func forceTransition(ctx context.Context, t *testing.T, namespace *corev1.Namesp
require.Greater(t, finalTransitions, startTransitions)
require.Greater(t, finalTime.UnixNano(), startTime.UnixNano())
time.Sleep(2 * time.Minute) // need to give clients time to notice this change because leader election is polling based
return finalLease
}
@@ -264,8 +307,6 @@ func forceRestart(ctx context.Context, t *testing.T, namespace *corev1.Namespace
require.Zero(t, *newLease.Spec.LeaseTransitions)
require.Greater(t, newLease.Spec.AcquireTime.UnixNano(), startLease.Spec.AcquireTime.UnixNano())
time.Sleep(2 * time.Minute) // need to give clients time to notice this change because leader election is polling based
return newLease
}

View File

@@ -0,0 +1,118 @@
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package integration
import (
"os"
"reflect"
"strings"
"testing"
"unsafe"
"go.pinniped.dev/test/testlib"
)
func TestMain(m *testing.M) {
splitIntegrationTestsIntoBuckets(m)
os.Exit(m.Run())
}
func splitIntegrationTestsIntoBuckets(m *testing.M) {
// this is some dark magic to set a private field
testsField := reflect.ValueOf(m).Elem().FieldByName("tests")
testsPointer := (*[]testing.InternalTest)(unsafe.Pointer(testsField.UnsafeAddr()))
tests := *testsPointer
if len(tests) == 0 {
return
}
var serialTests, parallelTests, disruptiveTests, finalTests []testing.InternalTest
for _, test := range tests {
test := test
// top level integration tests the end with the string _Parallel
// are indicating that they are safe to run in parallel with
// other serial tests (which Go does not let you easily express).
// top level tests that want the standard Go behavior of only running
// parallel tests with other parallel tests should use the regular
// t.Parallel() approach. this has no effect on any subtest.
switch {
case strings.HasSuffix(test.Name, "_Parallel"):
parallelTests = append(parallelTests, test)
// top level integration tests the end with the string _Disruptive
// are indicating that they are never safe to run with any other
// test because they break the underlying cluster in some way.
case strings.HasSuffix(test.Name, "_Disruptive"):
disruptiveTests = append(disruptiveTests, test)
default:
serialTests = append(serialTests, test)
}
}
serialTest := testing.InternalTest{
Name: "TestIntegrationSerial",
F: func(t *testing.T) {
_ = testlib.IntegrationEnv(t) // make sure these tests do not run during unit tests
t.Parallel() // outer test always runs in parallel for this bucket
for _, test := range serialTests {
test := test
t.Run(test.Name, func(t *testing.T) {
test.F(t) // inner serial tests do not run in parallel
})
}
},
}
parallelTest := testing.InternalTest{
Name: "TestIntegrationParallel",
F: func(t *testing.T) {
_ = testlib.IntegrationEnv(t) // make sure these tests do not run during unit tests
t.Parallel() // outer test always runs in parallel for this bucket
for _, test := range parallelTests {
test := test
t.Run(test.Name, func(t *testing.T) {
t.Parallel() // inner parallel tests do run in parallel
test.F(t)
})
}
},
}
disruptiveTest := testing.InternalTest{
Name: "TestIntegrationDisruptive",
F: func(t *testing.T) {
_ = testlib.IntegrationEnv(t) // make sure these tests do not run during unit tests
// outer test never runs in parallel for this bucket
for _, test := range disruptiveTests {
test := test
t.Run(test.Name, func(t *testing.T) {
test.F(t) // inner disruptive tests do not run in parallel
})
}
},
}
if len(parallelTests) > 0 {
finalTests = append(finalTests, parallelTest)
}
if len(serialTests) > 0 {
finalTests = append(finalTests, serialTest)
}
if len(disruptiveTests) > 0 {
finalTests = append(finalTests, disruptiveTest)
}
*testsPointer = finalTests
}

View File

@@ -39,7 +39,8 @@ import (
//
// Testing talking to the supervisor's port 8443 where the supervisor is terminating TLS itself is
// handled by the others tests in this file.
func TestSupervisorOIDCDiscovery(t *testing.T) {
// Never run this test in parallel since deleting all federation domains is disruptive, see main_test.go.
func TestSupervisorOIDCDiscovery_Disruptive(t *testing.T) {
env := testlib.IntegrationEnv(t)
client := testlib.NewSupervisorClientset(t)
@@ -143,7 +144,8 @@ func TestSupervisorOIDCDiscovery(t *testing.T) {
}
}
func TestSupervisorTLSTerminationWithSNI(t *testing.T) {
// Never run this test in parallel since deleting all federation domains is disruptive, see main_test.go.
func TestSupervisorTLSTerminationWithSNI_Disruptive(t *testing.T) {
env := testlib.IntegrationEnv(t)
pinnipedClient := testlib.NewSupervisorClientset(t)
kubeClient := testlib.NewKubernetesClientset(t)
@@ -214,7 +216,8 @@ func TestSupervisorTLSTerminationWithSNI(t *testing.T) {
})
}
func TestSupervisorTLSTerminationWithDefaultCerts(t *testing.T) {
// Never run this test in parallel since deleting all federation domains is disruptive, see main_test.go.
func TestSupervisorTLSTerminationWithDefaultCerts_Disruptive(t *testing.T) {
env := testlib.IntegrationEnv(t)
pinnipedClient := testlib.NewSupervisorClientset(t)
kubeClient := testlib.NewKubernetesClientset(t)

View File

@@ -18,7 +18,8 @@ import (
"go.pinniped.dev/test/testlib"
)
func TestSupervisorSecrets(t *testing.T) {
// safe to run in parallel with serial tests since it only interacts with a test local federation domain, see main_test.go.
func TestSupervisorSecrets_Parallel(t *testing.T) {
env := testlib.IntegrationEnv(t)
kubeClient := testlib.NewKubernetesClientset(t)
supervisorClient := testlib.NewSupervisorClientset(t)

View File

@@ -19,11 +19,8 @@ import (
"go.pinniped.dev/test/testlib"
)
func TestStorageGarbageCollection(t *testing.T) {
// Run this test in parallel with the other integration tests because it does a lot of waiting
// and will not impact other tests, or be impacted by other tests, when run in parallel.
t.Parallel()
// safe to run in parallel with serial tests since it only interacts with test local secrets, see main_test.go.
func TestStorageGarbageCollection_Parallel(t *testing.T) {
env := testlib.IntegrationEnv(t)
client := testlib.NewKubernetesClientset(t)
secrets := client.CoreV1().Secrets(env.SupervisorNamespace)

View File

@@ -29,7 +29,8 @@ import (
"go.pinniped.dev/test/testlib"
)
func TestWhoAmI_Kubeadm(t *testing.T) {
// whoami requests are non-mutating and safe to run in parallel with serial tests, see main_test.go.
func TestWhoAmI_Kubeadm_Parallel(t *testing.T) {
// use the cluster signing key being available as a proxy for this being a kubeadm cluster
// we should add more robust logic around skipping clusters based on vendor
_ = testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
@@ -60,7 +61,8 @@ func TestWhoAmI_Kubeadm(t *testing.T) {
)
}
func TestWhoAmI_ServiceAccount_Legacy(t *testing.T) {
// whoami requests are non-mutating and safe to run in parallel with serial tests, see main_test.go.
func TestWhoAmI_ServiceAccount_Legacy_Parallel(t *testing.T) {
_ = testlib.IntegrationEnv(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
@@ -133,8 +135,9 @@ func TestWhoAmI_ServiceAccount_Legacy(t *testing.T) {
)
}
func TestWhoAmI_ServiceAccount_TokenRequest(t *testing.T) {
_ = testlib.IntegrationEnv(t)
// whoami requests are non-mutating and safe to run in parallel with serial tests, see main_test.go.
func TestWhoAmI_ServiceAccount_TokenRequest_Parallel(t *testing.T) {
env := testlib.IntegrationEnv(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
@@ -168,9 +171,10 @@ func TestWhoAmI_ServiceAccount_TokenRequest(t *testing.T) {
corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "ignored-but-required",
Image: "busybox",
Command: []string{"sh", "-c", "sleep 3600"},
Name: "sleeper",
Image: env.ShellContainerImage,
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{"sh", "-c", "sleep 3600"},
},
},
ServiceAccountName: sa.Name,
@@ -241,7 +245,8 @@ func TestWhoAmI_ServiceAccount_TokenRequest(t *testing.T) {
)
}
func TestWhoAmI_CSR(t *testing.T) {
// whoami requests are non-mutating and safe to run in parallel with serial tests, see main_test.go.
func TestWhoAmI_CSR_Parallel(t *testing.T) {
// use the cluster signing key being available as a proxy for this not being an EKS cluster
// we should add more robust logic around skipping clusters based on vendor
_ = testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
@@ -329,7 +334,8 @@ func TestWhoAmI_CSR(t *testing.T) {
)
}
func TestWhoAmI_Anonymous(t *testing.T) {
// whoami requests are non-mutating and safe to run in parallel with serial tests, see main_test.go.
func TestWhoAmI_Anonymous_Parallel(t *testing.T) {
_ = testlib.IntegrationEnv(t).WithCapability(testlib.AnonymousAuthenticationSupported)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
@@ -359,7 +365,8 @@ func TestWhoAmI_Anonymous(t *testing.T) {
)
}
func TestWhoAmI_ImpersonateDirectly(t *testing.T) {
// whoami requests are non-mutating and safe to run in parallel with serial tests, see main_test.go.
func TestWhoAmI_ImpersonateDirectly_Parallel(t *testing.T) {
_ = testlib.IntegrationEnv(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)

View File

@@ -54,6 +54,7 @@ type TestEnv struct {
SupervisorHTTPSIngressCABundle string `json:"supervisorHttpsIngressCABundle"`
Proxy string `json:"proxy"`
APIGroupSuffix string `json:"apiGroupSuffix"`
ShellContainerImage string `json:"shellContainer"`
TestUser struct {
Token string `json:"token"`
@@ -127,7 +128,7 @@ func IntegrationEnv(t *testing.T) *TestEnv {
}
t.Helper()
SkipUnlessIntegration(t)
skipUnlessIntegration(t)
capabilitiesDescriptionYAML := os.Getenv("PINNIPED_TEST_CLUSTER_CAPABILITY_YAML")
capabilitiesDescriptionFile := os.Getenv("PINNIPED_TEST_CLUSTER_CAPABILITY_FILE")
@@ -232,6 +233,7 @@ func loadEnvVars(t *testing.T, result *TestEnv) {
result.Proxy = os.Getenv("PINNIPED_TEST_PROXY")
result.APIGroupSuffix = wantEnv("PINNIPED_TEST_API_GROUP_SUFFIX", "pinniped.dev")
result.ShellContainerImage = needEnv(t, "PINNIPED_TEST_SHELL_CONTAINER_IMAGE")
result.CLIUpstreamOIDC = TestOIDCUpstream{
Issuer: needEnv(t, "PINNIPED_TEST_CLI_OIDC_ISSUER"),

View File

@@ -5,8 +5,8 @@ package testlib
import "testing"
// SkipUnlessIntegration skips the current test if `-short` has been passed to `go test`.
func SkipUnlessIntegration(t *testing.T) {
// skipUnlessIntegration skips the current test if `-short` has been passed to `go test`.
func skipUnlessIntegration(t *testing.T) {
t.Helper()
if testing.Short() {
t.Skip("skipping integration test because of '-short' flag")