Add basic integration test cases

This commit is contained in:
Yang Le
2020-05-14 19:23:39 +08:00
parent b10a920e52
commit 90cf73cd3f
333 changed files with 201227 additions and 1605 deletions

32
test/integration-test.mk Normal file
View File

@@ -0,0 +1,32 @@
TEST_TMP :=/tmp
export KUBEBUILDER_ASSETS ?=$(TEST_TMP)/kubebuilder/bin
K8S_VERSION ?=1.16.4
KB_TOOLS_ARCHIVE_NAME :=kubebuilder-tools-$(K8S_VERSION)-$(GOHOSTOS)-$(GOHOSTARCH).tar.gz
KB_TOOLS_ARCHIVE_PATH := $(TEST_TMP)/$(KB_TOOLS_ARCHIVE_NAME)
# download the kubebuilder-tools to get kube-apiserver binaries from it
ensure-kubebuilder-tools:
ifeq "" "$(wildcard $(KUBEBUILDER_ASSETS))"
$(info Downloading kube-apiserver into '$(KUBEBUILDER_ASSETS)')
mkdir -p '$(KUBEBUILDER_ASSETS)'
curl -s -f -L https://storage.googleapis.com/kubebuilder-tools/$(KB_TOOLS_ARCHIVE_NAME) -o '$(KB_TOOLS_ARCHIVE_PATH)'
tar -C '$(KUBEBUILDER_ASSETS)' --strip-components=2 -zvxf '$(KB_TOOLS_ARCHIVE_PATH)'
else
$(info Using existing kube-apiserver from "$(KUBEBUILDER_ASSETS)")
endif
.PHONY: ensure-kubebuilder-tools
clean-integration-test:
$(RM) '$(KB_TOOLS_ARCHIVE_PATH)'
rm -rf $(TEST_TMP)/kubebuilder
$(RM) ./integration.test
.PHONY: clean-integration-test
clean: clean-integration-test
test-integration: ensure-kubebuilder-tools
go test -c ./test/integration
./integration.test -ginkgo.slowSpecThreshold=15 -ginkgo.v -ginkgo.failFast
.PHONY: test-integration

5
test/integration/doc.go Normal file
View File

@@ -0,0 +1,5 @@
// Package integration provides integration tests for open-cluster-management work, the test cases include
// - create work
// - update work
// - delete work
package integration

View File

@@ -0,0 +1,88 @@
package integration
import (
"io/ioutil"
"os"
"path"
"path/filepath"
"testing"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/kubectl/pkg/scheme"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
workclientset "github.com/open-cluster-management/api/client/work/clientset/versioned"
workapiv1 "github.com/open-cluster-management/api/work/v1"
"github.com/open-cluster-management/work/test/integration/util"
)
const (
eventuallyTimeout = 30 // seconds
eventuallyInterval = 1 // seconds
)
var tempDir string
var hubKubeconfigFileName string
var spokeRestConfig *rest.Config
var testEnv *envtest.Environment
var spokeKubeClient kubernetes.Interface
var hubWorkClient workclientset.Interface
func TestIntegration(t *testing.T) {
gomega.RegisterFailHandler(ginkgo.Fail)
ginkgo.RunSpecs(t, "Integration Suite")
}
var _ = ginkgo.BeforeSuite(func(done ginkgo.Done) {
logf.SetLogger(zap.LoggerTo(ginkgo.GinkgoWriter, true))
ginkgo.By("bootstrapping test environment")
// start a kube-apiserver
testEnv = &envtest.Environment{
ErrorIfCRDPathMissing: true,
CRDDirectoryPaths: []string{
filepath.Join(".", "vendor", "github.com", "open-cluster-management", "api", "work", "v1"),
},
}
cfg, err := testEnv.Start()
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Expect(cfg).ToNot(gomega.BeNil())
// create kubeconfig file for hub in a tmp dir
tempDir, err = ioutil.TempDir("", "test")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Expect(tempDir).ToNot(gomega.BeEmpty())
hubKubeconfigFileName = path.Join(tempDir, "kubeconfig")
err = util.CreateKubeconfigFile(cfg, hubKubeconfigFileName)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
err = workapiv1.AddToScheme(scheme.Scheme)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
spokeRestConfig = cfg
spokeKubeClient, err = kubernetes.NewForConfig(cfg)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
hubWorkClient, err = workclientset.NewForConfig(cfg)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
close(done)
}, 60)
var _ = ginkgo.AfterSuite(func() {
ginkgo.By("tearing down the test environment")
err := testEnv.Stop()
gomega.Expect(err).ToNot(gomega.HaveOccurred())
if tempDir != "" {
os.RemoveAll(tempDir)
}
})

View File

@@ -0,0 +1,90 @@
package util
import (
"context"
"reflect"
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
workclientset "github.com/open-cluster-management/api/client/work/clientset/versioned"
workapiv1 "github.com/open-cluster-management/api/work/v1"
)
func AssertWorkCondition(namespace, name string, workClient workclientset.Interface, expectedType string, expectedWorkStatus metav1.ConditionStatus,
expectedManifestStatuses []metav1.ConditionStatus, eventuallyTimeout, eventuallyInterval int) {
gomega.Eventually(func() bool {
work, err := workClient.WorkV1().ManifestWorks(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return false
}
// check manifest status conditions
if ok := HaveManifestCondition(work.Status.ResourceStatus.Manifests, expectedType, expectedManifestStatuses); !ok {
return false
}
// check work status condition
return HaveCondition(work.Status.Conditions, string(workapiv1.WorkApplied), metav1.ConditionTrue)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
}
// check if work is deleted
func AssertWorkDeleted(namespace, name string, manifests []workapiv1.Manifest, workClient workclientset.Interface, kubeClient kubernetes.Interface, eventuallyTimeout, eventuallyInterval int) {
gomega.Eventually(func() bool {
_, err := workClient.WorkV1().ManifestWorks(namespace).Get(context.Background(), name, metav1.GetOptions{})
if !apierrors.IsNotFound(err) {
return false
}
for _, manifest := range manifests {
expected := manifest.Object.(*corev1.ConfigMap)
_, err = kubeClient.CoreV1().ConfigMaps(expected.Namespace).Get(context.Background(), expected.Name, metav1.GetOptions{})
if !apierrors.IsNotFound(err) {
return false
}
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
}
// check if finalizer is added
func AssertFinalizerAdded(namespace, name string, workClient workclientset.Interface, eventuallyTimeout, eventuallyInterval int) {
gomega.Eventually(func() bool {
work, err := workClient.WorkV1().ManifestWorks(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return false
}
for _, finalizer := range work.Finalizers {
if finalizer == "cluster.open-cluster-management.io/manifest-work-cleanup" {
return true
}
}
return false
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
}
// check if all manifests are applied
func AssertManifestsApplied(manifests []workapiv1.Manifest, kubeClient kubernetes.Interface, eventuallyTimeout, eventuallyInterval int) {
gomega.Eventually(func() bool {
for _, manifest := range manifests {
expected := manifest.Object.(*corev1.ConfigMap)
actual, err := kubeClient.CoreV1().ConfigMaps(expected.Namespace).Get(context.Background(), expected.Name, metav1.GetOptions{})
if err != nil {
return false
}
if !reflect.DeepEqual(actual.Data, expected.Data) {
return false
}
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
}

View File

@@ -0,0 +1,157 @@
package util
import (
"fmt"
"github.com/onsi/ginkgo"
"github.com/openshift/library-go/pkg/operator/events"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
workapiv1 "github.com/open-cluster-management/api/work/v1"
)
func HaveCondition(conditions []workapiv1.StatusCondition, expectedType string, expectedStatus metav1.ConditionStatus) bool {
found := false
for _, condition := range conditions {
if condition.Type != expectedType {
continue
}
found = true
if condition.Status != expectedStatus {
return false
}
return true
}
return found
}
func HaveManifestCondition(conditions []workapiv1.ManifestCondition, expectedType string, expectedStatuses []metav1.ConditionStatus) bool {
if len(conditions) != len(expectedStatuses) {
return false
}
for index, condition := range conditions {
expectedStatus := expectedStatuses[index]
if expectedStatus == "" {
continue
}
if ok := HaveCondition(condition.Conditions, expectedType, expectedStatus); !ok {
return false
}
}
return true
}
func NewManifestWork(namespace, name string, manifests []workapiv1.Manifest) *workapiv1.ManifestWork {
work := &workapiv1.ManifestWork{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
},
Spec: workapiv1.ManifestWorkSpec{
Workload: workapiv1.ManifestsTemplate{
Manifests: manifests,
},
},
}
if name != "" {
work.Name = name
} else {
work.GenerateName = "work-"
}
return work
}
func NewConfigmap(namespace, name string, data map[string]string) *corev1.ConfigMap {
cm := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Data: data,
}
return cm
}
func ToManifest(object runtime.Object) workapiv1.Manifest {
manifest := workapiv1.Manifest{}
manifest.Object = object
return manifest
}
func CreateKubeconfigFile(clientConfig *rest.Config, filename string) error {
// Build kubeconfig.
kubeconfig := clientcmdapi.Config{
Clusters: map[string]*clientcmdapi.Cluster{"default-cluster": {
Server: clientConfig.Host,
InsecureSkipTLSVerify: clientConfig.Insecure,
CertificateAuthorityData: clientConfig.CAData,
}},
AuthInfos: map[string]*clientcmdapi.AuthInfo{"default-auth": {
ClientCertificate: clientConfig.CertFile,
ClientCertificateData: clientConfig.CertData,
ClientKey: clientConfig.KeyFile,
ClientKeyData: clientConfig.KeyData,
}},
Contexts: map[string]*clientcmdapi.Context{"default-context": {
Cluster: "default-cluster",
AuthInfo: "default-auth",
Namespace: "configuration",
}},
CurrentContext: "default-context",
}
return clientcmd.WriteToFile(kubeconfig, filename)
}
func NewIntegrationTestEventRecorder(componet string) events.Recorder {
return &IntegrationTestEventRecorder{component: componet}
}
type IntegrationTestEventRecorder struct {
component string
}
func (r *IntegrationTestEventRecorder) ComponentName() string {
return r.component
}
func (r *IntegrationTestEventRecorder) ForComponent(c string) events.Recorder {
return &IntegrationTestEventRecorder{component: c}
}
func (r *IntegrationTestEventRecorder) WithComponentSuffix(suffix string) events.Recorder {
return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix))
}
func (r *IntegrationTestEventRecorder) Event(reason, message string) {
fmt.Fprintf(ginkgo.GinkgoWriter, "Event: [%s] %v: %v \n", r.component, reason, message)
}
func (r *IntegrationTestEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) {
r.Event(reason, fmt.Sprintf(messageFmt, args...))
}
func (r *IntegrationTestEventRecorder) Warning(reason, message string) {
fmt.Fprintf(ginkgo.GinkgoWriter, "Warning: [%s] %v: %v \n", r.component, reason, message)
}
func (r *IntegrationTestEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) {
r.Warning(reason, fmt.Sprintf(messageFmt, args...))
}

View File

@@ -0,0 +1,147 @@
package integration
import (
"context"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"github.com/openshift/library-go/pkg/controller/controllercmd"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilrand "k8s.io/apimachinery/pkg/util/rand"
workapiv1 "github.com/open-cluster-management/api/work/v1"
"github.com/open-cluster-management/work/pkg/spoke"
"github.com/open-cluster-management/work/test/integration/util"
)
func startWorkAgent(ctx context.Context, o *spoke.WorkloadAgentOptions) {
err := o.RunWorkloadAgent(ctx, &controllercmd.ControllerContext{
KubeConfig: spokeRestConfig,
EventRecorder: util.NewIntegrationTestEventRecorder("integration"),
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
var _ = ginkgo.Describe("ManifestWork", func() {
var o *spoke.WorkloadAgentOptions
var cancel context.CancelFunc
var work *workapiv1.ManifestWork
var manifests []workapiv1.Manifest
var err error
ginkgo.BeforeEach(func() {
o = spoke.NewWorkloadAgentOptions()
o.HubKubeconfigFile = hubKubeconfigFileName
o.SpokeClusterName = utilrand.String(5)
ns := &corev1.Namespace{}
ns.Name = o.SpokeClusterName
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o)
})
ginkgo.JustBeforeEach(func() {
work = util.NewManifestWork(o.SpokeClusterName, "", manifests)
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.AfterEach(func() {
if cancel != nil {
cancel()
}
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.SpokeClusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.Context("With a single manifest", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"})),
}
})
ginkgo.It("should create work and then apply it successfully", func() {
util.AssertManifestsApplied(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
/* comment this block until PR work/8 (https://github.com/open-cluster-management/work/pull/8) is merged
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
*/
})
ginkgo.It("should update work and then apply it successfully", func() {
newManifests := []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"x": "y"})),
}
work.Spec.Workload.Manifests = newManifests
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertManifestsApplied(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
// TODO: check if resources created by old manifests are deleted
})
ginkgo.It("should delete work successfully", func() {
util.AssertFinalizerAdded(work.Namespace, work.Name, hubWorkClient, eventuallyTimeout, eventuallyInterval)
err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkDeleted(work.Namespace, work.Name, manifests, hubWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
})
})
ginkgo.Context("With multiple manifests", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap("non-existent-namespace", "cm1", map[string]string{"a": "b"})),
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"})),
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm3", map[string]string{"e": "f"})),
}
})
ginkgo.It("should create work and then apply it successfully", func() {
util.AssertManifestsApplied(manifests[1:], spokeKubeClient, eventuallyTimeout, eventuallyInterval)
/* comment this block until PR work/8 (https://github.com/open-cluster-management/work/pull/8) is merged
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
*/
})
ginkgo.It("should update work and then apply it successfully", func() {
newManifests := []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"})),
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"x": "y"})),
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm3", map[string]string{"e": "f"})),
}
work.Spec.Workload.Manifests = newManifests
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertManifestsApplied(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
// TODO: check if resources created by old manifests are deleted
})
ginkgo.It("should delete work successfully", func() {
util.AssertFinalizerAdded(work.Namespace, work.Name, hubWorkClient, eventuallyTimeout, eventuallyInterval)
err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkDeleted(work.Namespace, work.Name, manifests, hubWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
})
})
})