From c993830f25b9a30c66c223b10870f13eeb014d7e Mon Sep 17 00:00:00 2001 From: Jian Qiu Date: Fri, 30 Jul 2021 17:45:35 +0800 Subject: [PATCH] Support delete option Signed-off-by: Jian Qiu --- Makefile | 2 +- deploy/spoke/clusterrole.yaml | 2 +- deploy/webhook/manifestworks.crd.yaml | 48 +++ go.mod | 2 + go.sum | 19 +- pkg/helper/helper_test.go | 59 ++- pkg/helper/helpers.go | 50 ++- .../appliedmanifestwork_controller.go | 7 +- .../appliedmanifestwork_controller_test.go | 38 +- ...appliedmanifestwork_finalize_controller.go | 6 +- ...edmanifestwork_finalize_controller_test.go | 14 +- .../manifestwork_controller.go | 205 ++++++++--- .../manifestwork_controller_test.go | 90 ++++- .../spoketesting/manifestwork_helpers.go | 15 +- test/e2e/bindata/bindata.go | 2 +- test/integration/util/assertion.go | 10 +- test/integration/work_test.go | 344 ++++++++++++++---- ...000_10_config-operator_01_ingress.crd.yaml | 13 +- .../openshift/api/config/v1/types.go | 10 +- .../v1/zz_generated.swagger_doc_generated.go | 6 +- vendor/github.com/openshift/api/go.mod | 7 +- vendor/github.com/openshift/api/go.sum | 12 +- vendor/modules.txt | 5 +- 23 files changed, 767 insertions(+), 199 deletions(-) diff --git a/Makefile b/Makefile index 9efd8e741..4ee649753 100644 --- a/Makefile +++ b/Makefile @@ -61,7 +61,7 @@ endif e2e-hub-kubeconfig-secret: cluster-ip cp $(HUB_KUBECONFIG) e2e-hub-kubeconfig $(KUBECTL) apply -f deploy/spoke/component_namespace.yaml --kubeconfig $(SPOKE_KUBECONFIG) - $(KUBECTL) config set clusters.kind-kind.server https://$(CLUSTER_IP) --kubeconfig e2e-hub-kubeconfig + $(KUBECTL) config set clusters.$(HUB_KUBECONFIG_CONTEXT).server https://$(CLUSTER_IP) --kubeconfig e2e-hub-kubeconfig $(KUBECTL) delete secret e2e-hub-kubeconfig-secret -n open-cluster-management-agent --ignore-not-found --kubeconfig $(SPOKE_KUBECONFIG) $(KUBECTL) create secret generic e2e-hub-kubeconfig-secret --from-file=kubeconfig=e2e-hub-kubeconfig -n open-cluster-management-agent --kubeconfig $(SPOKE_KUBECONFIG) $(RM) ./e2e-hub-kubeconfig diff --git a/deploy/spoke/clusterrole.yaml b/deploy/spoke/clusterrole.yaml index b0804c840..3fe6bd17b 100644 --- a/deploy/spoke/clusterrole.yaml +++ b/deploy/spoke/clusterrole.yaml @@ -11,7 +11,7 @@ rules: # Allow agent to create/delete namespaces, get/list are contained in admin role already - apiGroups: [""] resources: ["namespaces"] - verbs: ["create", "delete"] + verbs: ["create", "delete", "update"] # Allow agent to manage role/rolebinding/clusterrole/clusterrolebinding - apiGroups: ["rbac.authorization.k8s.io"] resources: ["clusterrolebindings", "rolebindings"] diff --git a/deploy/webhook/manifestworks.crd.yaml b/deploy/webhook/manifestworks.crd.yaml index 665c87c5c..6ee4d706e 100644 --- a/deploy/webhook/manifestworks.crd.yaml +++ b/deploy/webhook/manifestworks.crd.yaml @@ -39,6 +39,54 @@ spec: on the managed cluster. type: object properties: + deleteOption: + description: DeleteOption represents deletion strategy when the manifestwork + is deleted. Foreground deletion strategy is applied to all the resource + in this manifestwork if it is not set. + type: object + properties: + propagationPolicy: + description: propagationPolicy can be Foreground, Orphan or SelectivelyOrphan + SelectivelyOrphan should be rarely used. It is provided for + cases where particular resources is transfering ownership from + one ManifestWork to another or another management unit. Setting + this value will allow a flow like 1. create manifestwork/2 to + manage foo 2. update manifestwork/1 to selectively orphan foo + 3. remove foo from manifestwork/1 without impacting continuity + because manifestwork/2 adopts it. + type: string + default: ForeGround + selectivelyOrphans: + description: selectivelyOrphan represents a list of resources + following orphan deletion stratecy + type: object + properties: + orphaningRules: + description: orphaningRules defines a slice of orphaningrule. + Each orphaningrule identifies a single resource included + in this manifestwork + type: array + items: + description: OrphaningRule identifies a single resource + included in this manifestwork + type: object + properties: + Name: + description: Name is the names of the resources in the + workload that the strategy is applied + type: string + Namespace: + description: Namespace is the namespaces of the resources + in the workload that the strategy is applied + type: string + group: + description: Group is the api group of the resources + in the workload that the strategy is applied + type: string + resource: + description: Resource is the resources in the workload + that the strategy is applied + type: string workload: description: Workload represents the manifest workload to be deployed on a managed cluster. diff --git a/go.mod b/go.mod index a52efa60f..06ac75aff 100644 --- a/go.mod +++ b/go.mod @@ -24,3 +24,5 @@ require ( open-cluster-management.io/api v0.0.0-20210823013037-9667ae902e4b sigs.k8s.io/controller-runtime v0.9.6 ) + +replace github.com/openshift/library-go => github.com/qiujian16/library-go v0.0.0-20210830020149-91704dd0762a diff --git a/go.sum b/go.sum index 1d7fb2883..3e3e504b4 100644 --- a/go.sum +++ b/go.sum @@ -479,17 +479,15 @@ github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI= github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/openshift/api v0.0.0-20210831091943-07e756545ac1 h1:BleifEWC+NP/YhYHyQlGrDflXZPxawwOzyLUI+nr4jw= -github.com/openshift/api v0.0.0-20210831091943-07e756545ac1/go.mod h1:RsQCVJu4qhUawxxDP7pGlwU3IA4F01wYm3qKEu29Su8= +github.com/openshift/api v0.0.0-20210730095913-85e1d547cdee h1:6c2jWQObJGpqW+3UynpLUUTWv1D7VoMoTUM/kT55glU= +github.com/openshift/api v0.0.0-20210730095913-85e1d547cdee/go.mod h1:ntkQrC1Z6AxxkhDlVpDVjkD+pzdwVUalWyfH40rSyyM= github.com/openshift/build-machinery-go v0.0.0-20210115170933-e575b44a7a94/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/build-machinery-go v0.0.0-20210806203541-4ea9b6da3a37 h1:40Nw4fwP1tXx0g1UgIGoLA9eoSdLm7jBUXFH5uVYjBA= github.com/openshift/build-machinery-go v0.0.0-20210806203541-4ea9b6da3a37/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= -github.com/openshift/client-go v0.0.0-20210831095141-e19a065e79f7/go.mod h1:D6P8RkJzwdkBExQdYUnkWcePMLBiTeCCr8eQIQ7y8Dk= +github.com/openshift/client-go v0.0.0-20210730113412-1811c1b3fc0e/go.mod h1:P1pjphFOgm/nYjmtouHGaSLGtdP25dQICJnYtcYhfEs= github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480 h1:y47BAJFepK8Xls1c+quIOyc46OXiT9LRiqGVjIaMlSA= github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480/go.mod h1:OAHL5WnZphlhVEf5fTdeGLvNwMu1B2zCWpmxJpCA35o= -github.com/openshift/library-go v0.0.0-20210902020120-5ddb355aea6f h1:dT4WEKxaVsQRlig8F4PXWf/EUCDQh3tBt9ZO7OpO5b4= -github.com/openshift/library-go v0.0.0-20210902020120-5ddb355aea6f/go.mod h1:ymWf1TnfDo0LgjihlqHzYoy81pTY5wBL+bl3XdHNEYI= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -534,6 +532,8 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/qiujian16/library-go v0.0.0-20210830020149-91704dd0762a h1:PgTnMP7QACLm7iYakAjb4tVrzvdvhsB0njpCMwCcwy8= +github.com/qiujian16/library-go v0.0.0-20210830020149-91704dd0762a/go.mod h1:3GagmGg6gikg+hAqma7E7axBzs2pjx4+GrAbdl4OYdY= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= @@ -1047,11 +1047,13 @@ k8s.io/api v0.18.0-beta.2/go.mod h1:2oeNnWEqcSmaM/ibSh3t7xcIqbkGXhzZdn4ezV9T4m0= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= +k8s.io/api v0.22.0-rc.0/go.mod h1:EUcKB6RvpW74HMRUSSNwpUzrIHBdGT1FeAvOV+txic0= k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY= k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8= k8s.io/apiextensions-apiserver v0.18.0-beta.2/go.mod h1:Hnrg5jx8/PbxRbUoqDGxtQkULjwx8FDW4WYJaKNK+fk= k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= +k8s.io/apiextensions-apiserver v0.22.0-rc.0/go.mod h1:KSr+2VJ6ye8Fy50q7xHZ/Tw8vrRII82KIKbz9eUFmeo= k8s.io/apiextensions-apiserver v0.22.1 h1:YSJYzlFNFSfUle+yeEXX0lSQyLEoxoPJySRupepb0gE= k8s.io/apiextensions-apiserver v0.22.1/go.mod h1:HeGmorjtRmRLE+Q8dJu6AYRoZccvCMsghwS8XTUYb2c= k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= @@ -1059,12 +1061,14 @@ k8s.io/apimachinery v0.18.0-beta.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZF k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= +k8s.io/apimachinery v0.22.0-rc.0/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= k8s.io/apiserver v0.18.0-beta.2/go.mod h1:bnblMkMoCFnIfVnVftd0SXJPzyvrk3RtaqSbblphF/A= k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= +k8s.io/apiserver v0.22.0-rc.0/go.mod h1:1AfFSkRbaPVFzfSIWd0m/onp49mmAOqXR9qrLJFixlw= k8s.io/apiserver v0.22.1 h1:Ul9Iv8OMB2s45h2tl5XWPpAZo1VPIJ/6N+MESeed7L8= k8s.io/apiserver v0.22.1/go.mod h1:2mcM6dzSt+XndzVQJX21Gx0/Klo7Aen7i0Ai6tIa400= k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= @@ -1072,17 +1076,20 @@ k8s.io/client-go v0.18.0-beta.2/go.mod h1:UvuVxHjKWIcgy0iMvF+bwNDW7l0mskTNOaOW1Q k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU= +k8s.io/client-go v0.22.0-rc.0/go.mod h1:BZGppBKJh4UtgDZcIIh6vHJsJ1iZiXS7EwKZYWhyklo= k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw= k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk= k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.18.0-beta.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= +k8s.io/code-generator v0.22.0-rc.0/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= k8s.io/component-base v0.18.0-beta.2/go.mod h1:HVk5FpRnyzQ/MjBr9//e/yEBjTVa2qjGXCTuUzcD7ks= k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= +k8s.io/component-base v0.22.0-rc.0/go.mod h1:DKSub/kewg24bK+3ZJ/csu86fSBYpGdYk837eCTvEKg= k8s.io/component-base v0.22.1 h1:SFqIXsEN3v3Kkr1bS6rstrs1wd45StJqbtgbQ4nRQdo= k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -1103,6 +1110,7 @@ k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-aggregator v0.18.0-beta.2/go.mod h1:O3Td9mheraINbLHH4pzoFP2gRzG0Wk1COqzdSL4rBPk= +k8s.io/kube-aggregator v0.22.0-rc.0/go.mod h1:g0xtiBSsbMKvewN7xR/Icib4TrHxtvrJcHtYvFsgw7k= k8s.io/kube-aggregator v0.22.1 h1:hsntyWsnkLiL4ccmoKfqiUVyxnlnqtqPRMuq/mT2wGQ= k8s.io/kube-aggregator v0.22.1/go.mod h1:VbmI+8fUeCPkzSvarWTrlIGEgUGEGI/66SFajDQ0Pdc= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= @@ -1132,6 +1140,7 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.21/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-runtime v0.9.6 h1:EevVMlgUj4fC1NVM4+DB3iPkWkmGRNarA66neqv9Qew= diff --git a/pkg/helper/helper_test.go b/pkg/helper/helper_test.go index 580450abe..8b4adffb3 100644 --- a/pkg/helper/helper_test.go +++ b/pkg/helper/helper_test.go @@ -45,15 +45,16 @@ func newManifestCondition(ordinal int32, resource string, conds ...metav1.Condit } } -func newSecret(namespace, name string, terminated bool, uid string) *corev1.Secret { +func newSecret(namespace, name string, terminated bool, uid string, owner ...metav1.OwnerReference) *corev1.Secret { secret := &corev1.Secret{ TypeMeta: metav1.TypeMeta{ Kind: "Secret", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, + Name: name, + Namespace: namespace, + OwnerReferences: owner, }, } @@ -333,29 +334,47 @@ func TestDeleteAppliedResourcess(t *testing.T) { existingResources []runtime.Object resourcesToRemove []workapiv1.AppliedManifestResourceMeta expectedResourcesPendingFinalization []workapiv1.AppliedManifestResourceMeta + owner metav1.OwnerReference }{ { name: "skip if resource does not exist", resourcesToRemove: []workapiv1.AppliedManifestResourceMeta{ {Version: "v1", Resource: "secrets", Namespace: "ns1", Name: "n1"}, }, + owner: metav1.OwnerReference{Name: "n1", UID: "a"}, }, { name: "skip if resource have different uid", existingResources: []runtime.Object{ - newSecret("ns1", "n1", false, "ns1-n1-xxx"), - newSecret("ns2", "n2", true, "ns2-n2-xxx"), + newSecret("ns1", "n1", false, "ns1-n1-xxx", metav1.OwnerReference{Name: "n1", UID: "a"}), + newSecret("ns2", "n2", true, "ns2-n2-xxx", metav1.OwnerReference{Name: "n1", UID: "a"}), }, resourcesToRemove: []workapiv1.AppliedManifestResourceMeta{ {Version: "v1", Resource: "secrets", Namespace: "ns1", Name: "n1", UID: "ns1-n1"}, {Version: "v1", Resource: "secrets", Namespace: "ns2", Name: "n2", UID: "ns2-n2"}, }, + owner: metav1.OwnerReference{Name: "n1", UID: "a"}, }, { name: "delete resources", existingResources: []runtime.Object{ - newSecret("ns1", "n1", false, "ns1-n1"), - newSecret("ns2", "n2", true, "ns2-n2"), + newSecret("ns1", "n1", false, "ns1-n1", metav1.OwnerReference{Name: "n1", UID: "a"}), + newSecret("ns2", "n2", false, "ns2-n2", metav1.OwnerReference{Name: "n2", UID: "b"}), + }, + resourcesToRemove: []workapiv1.AppliedManifestResourceMeta{ + {Version: "v1", Resource: "secrets", Namespace: "ns1", Name: "n1", UID: "ns1-n1"}, + {Version: "v1", Resource: "secrets", Namespace: "ns2", Name: "n2", UID: "ns2-n2"}, + }, + expectedResourcesPendingFinalization: []workapiv1.AppliedManifestResourceMeta{ + {Version: "v1", Resource: "secrets", Namespace: "ns1", Name: "n1", UID: "ns1-n1"}, + }, + owner: metav1.OwnerReference{Name: "n1", UID: "a"}, + }, + { + name: "skip without uid", + existingResources: []runtime.Object{ + newSecret("ns1", "n1", false, "ns1-n1", metav1.OwnerReference{Name: "n1", UID: "a"}), + newSecret("ns2", "n2", true, "ns2-n2", metav1.OwnerReference{Name: "n1", UID: "a"}), }, resourcesToRemove: []workapiv1.AppliedManifestResourceMeta{ {Version: "v1", Resource: "secrets", Namespace: "ns1", Name: "n1"}, @@ -364,6 +383,30 @@ func TestDeleteAppliedResourcess(t *testing.T) { expectedResourcesPendingFinalization: []workapiv1.AppliedManifestResourceMeta{ {Version: "v1", Resource: "secrets", Namespace: "ns2", Name: "n2", UID: "ns2-n2"}, }, + owner: metav1.OwnerReference{Name: "n1", UID: "a"}, + }, + { + name: "skip if it is now owned", + existingResources: []runtime.Object{ + newSecret("ns1", "n1", false, "ns1-n1", metav1.OwnerReference{Name: "n2", UID: "b"}), + }, + resourcesToRemove: []workapiv1.AppliedManifestResourceMeta{ + {Version: "v1", Resource: "secrets", Namespace: "ns1", Name: "n1", UID: "ns1-n1"}, + }, + expectedResourcesPendingFinalization: []workapiv1.AppliedManifestResourceMeta{}, + owner: metav1.OwnerReference{Name: "n1", UID: "a"}, + }, + { + name: "skip with multiple owners", + existingResources: []runtime.Object{ + newSecret("ns1", "n1", false, "ns1-n1", metav1.OwnerReference{Name: "n1", UID: "a"}, metav1.OwnerReference{Name: "n2", UID: "b"}), + }, + resourcesToRemove: []workapiv1.AppliedManifestResourceMeta{ + {Version: "v1", Resource: "secrets", Namespace: "ns1", Name: "n1", UID: "ns1-n1"}, + {Version: "v1", Resource: "secrets", Namespace: "ns2", Name: "n2", UID: "ns2-n2"}, + }, + expectedResourcesPendingFinalization: []workapiv1.AppliedManifestResourceMeta{}, + owner: metav1.OwnerReference{Name: "n1", UID: "a"}, }, } @@ -373,7 +416,7 @@ func TestDeleteAppliedResourcess(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { fakeDynamicClient := fakedynamic.NewSimpleDynamicClient(scheme, c.existingResources...) - actual, err := DeleteAppliedResources(c.resourcesToRemove, "testing", fakeDynamicClient, eventstesting.NewTestingEventRecorder(t)) + actual, err := DeleteAppliedResources(c.resourcesToRemove, "testing", fakeDynamicClient, eventstesting.NewTestingEventRecorder(t), c.owner) if err != nil { t.Errorf("unexpected err: %v", err) } diff --git a/pkg/helper/helpers.go b/pkg/helper/helpers.go index a5aa9f866..4b10f43b6 100644 --- a/pkg/helper/helpers.go +++ b/pkg/helper/helpers.go @@ -10,6 +10,7 @@ import ( "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/resource/resourcehelper" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/api/equality" @@ -176,10 +177,19 @@ func UpdateManifestWorkStatus( // DeleteAppliedResources deletes all given applied resources and returns those pending for finalization // If the uid recorded in resources is different from what we get by client, ignore the deletion. -func DeleteAppliedResources(resources []workapiv1.AppliedManifestResourceMeta, reason string, dynamicClient dynamic.Interface, recorder events.Recorder) ([]workapiv1.AppliedManifestResourceMeta, []error) { +func DeleteAppliedResources( + resources []workapiv1.AppliedManifestResourceMeta, + reason string, + dynamicClient dynamic.Interface, + recorder events.Recorder, + owner metav1.OwnerReference) ([]workapiv1.AppliedManifestResourceMeta, []error) { var resourcesPendingFinalization []workapiv1.AppliedManifestResourceMeta var errs []error + // set owner to be removed + ownerCopy := owner.DeepCopy() + ownerCopy.UID = types.UID(fmt.Sprintf("%s-", owner.UID)) + for _, resource := range resources { gvr := schema.GroupVersionResource{Group: resource.Group, Version: resource.Version, Resource: resource.Resource} u, err := dynamicClient. @@ -198,6 +208,34 @@ func DeleteAppliedResources(resources []workapiv1.AppliedManifestResourceMeta, r continue } + existingOwner := u.GetOwnerReferences() + + // If it is not owned by us, skip + if !IsOwnedBy(owner, existingOwner) { + continue + } + + // Merge with the existing owners to move the owner. + modified := resourcemerge.BoolPtr(false) + resourcemerge.MergeOwnerRefs(modified, &existingOwner, []metav1.OwnerReference{*ownerCopy}) + + // If there are still any other existing owners (not only ManifestWorks), update ownerrefs only. + if len(existingOwner) > 0 { + if !*modified { + continue + } + + u.SetOwnerReferences(existingOwner) + _, err = dynamicClient.Resource(gvr).Namespace(resource.Namespace).Update(context.TODO(), u, metav1.UpdateOptions{}) + if err != nil { + errs = append(errs, fmt.Errorf( + "Failed to remove owner from resource %v with key %s/%s: %w", + gvr, resource.Namespace, resource.Name, err)) + } + + continue + } + if resource.UID != string(u.GetUID()) { // the traced instance has been deleted, and forget this item. continue @@ -286,6 +324,16 @@ func HubHash(hubServer string) string { return fmt.Sprintf("%x", sha256.Sum256([]byte(hubServer))) } +// IsOwnedBy check if owner exists in the ownerrefs. +func IsOwnedBy(myOwner metav1.OwnerReference, existingOwners []metav1.OwnerReference) bool { + for _, owner := range existingOwners { + if myOwner.UID == owner.UID { + return true + } + } + return false +} + func NewAppliedManifestWorkOwner(appliedWork *workapiv1.AppliedManifestWork) *metav1.OwnerReference { return &metav1.OwnerReference{ APIVersion: workapiv1.GroupVersion.WithKind("AppliedManifestWork").GroupVersion().String(), diff --git a/pkg/spoke/controllers/appliedmanifestcontroller/appliedmanifestwork_controller.go b/pkg/spoke/controllers/appliedmanifestcontroller/appliedmanifestwork_controller.go index f8a86466b..a0e743b3d 100644 --- a/pkg/spoke/controllers/appliedmanifestcontroller/appliedmanifestwork_controller.go +++ b/pkg/spoke/controllers/appliedmanifestcontroller/appliedmanifestwork_controller.go @@ -153,10 +153,15 @@ func (m *AppliedManifestWorkController) syncManifestWork( return utilerrors.NewAggregate(errs) } + owner := helper.NewAppliedManifestWorkOwner(appliedManifestWork) + // delete applied resources which are no longer maintained by manifest work noLongerMaintainedResources := findUntrackedResources(appliedManifestWork.Status.AppliedResources, appliedResources) + reason := fmt.Sprintf("it is no longer maintained by manifestwork %s", manifestWork.Name) - resourcesPendingFinalization, errs := helper.DeleteAppliedResources(noLongerMaintainedResources, reason, m.spokeDynamicClient, controllerContext.Recorder()) + + resourcesPendingFinalization, errs := helper.DeleteAppliedResources( + noLongerMaintainedResources, reason, m.spokeDynamicClient, controllerContext.Recorder(), *owner) if len(errs) != 0 { return utilerrors.NewAggregate(errs) } diff --git a/pkg/spoke/controllers/appliedmanifestcontroller/appliedmanifestwork_controller_test.go b/pkg/spoke/controllers/appliedmanifestcontroller/appliedmanifestwork_controller_test.go index 2f905e435..d9c3e465f 100644 --- a/pkg/spoke/controllers/appliedmanifestcontroller/appliedmanifestwork_controller_test.go +++ b/pkg/spoke/controllers/appliedmanifestcontroller/appliedmanifestwork_controller_test.go @@ -9,6 +9,7 @@ import ( "github.com/davecgh/go-spew/spew" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" fakedynamic "k8s.io/client-go/dynamic/fake" clienttesting "k8s.io/client-go/testing" "k8s.io/client-go/util/workqueue" @@ -16,6 +17,7 @@ import ( fakeworkclient "open-cluster-management.io/api/client/work/clientset/versioned/fake" workinformers "open-cluster-management.io/api/client/work/informers/externalversions" workapiv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/work/pkg/helper" "open-cluster-management.io/work/pkg/spoke/spoketesting" ) @@ -32,6 +34,10 @@ func newManifest(group, version, resource, namespace, name string) workapiv1.Man } func TestSyncManifestWork(t *testing.T) { + uid := types.UID("test") + appliedWork := spoketesting.NewAppliedManifestWork("test", 0, uid) + owner := helper.NewAppliedManifestWorkOwner(appliedWork) + cases := []struct { name string existingResources []runtime.Object @@ -44,7 +50,7 @@ func TestSyncManifestWork(t *testing.T) { { name: "skip when no applied resource changed", existingResources: []runtime.Object{ - spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1"), + spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1", *owner), }, appliedResources: []workapiv1.AppliedManifestResourceMeta{ {Version: "v1", Resource: "secrets", Namespace: "ns1", Name: "n1", UID: "ns1-n1"}, @@ -60,12 +66,12 @@ func TestSyncManifestWork(t *testing.T) { { name: "delete untracked resources", existingResources: []runtime.Object{ - spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1"), - spoketesting.NewUnstructuredSecret("ns2", "n2", false, "ns2-n2"), - spoketesting.NewUnstructuredSecret("ns3", "n3", false, "ns3-n3"), - spoketesting.NewUnstructuredSecret("ns4", "n4", false, "ns4-n4"), - spoketesting.NewUnstructuredSecret("ns5", "n5", false, "ns5-n5"), - spoketesting.NewUnstructuredSecret("ns6", "n6", false, "ns6-n6"), + spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1", *owner), + spoketesting.NewUnstructuredSecret("ns2", "n2", false, "ns2-n2", *owner), + spoketesting.NewUnstructuredSecret("ns3", "n3", false, "ns3-n3", *owner), + spoketesting.NewUnstructuredSecret("ns4", "n4", false, "ns4-n4", *owner), + spoketesting.NewUnstructuredSecret("ns5", "n5", false, "ns5-n5", *owner), + spoketesting.NewUnstructuredSecret("ns6", "n6", false, "ns6-n6", *owner), }, appliedResources: []workapiv1.AppliedManifestResourceMeta{ {Group: "", Version: "v1", Resource: "secrets", Namespace: "ns1", Name: "n1", UID: "ns1-n1"}, @@ -103,9 +109,9 @@ func TestSyncManifestWork(t *testing.T) { { name: "requeue work when applied resource for stale manifest is deleting", existingResources: []runtime.Object{ - spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1"), - spoketesting.NewUnstructuredSecret("ns2", "n2", false, "ns2-n2"), - spoketesting.NewUnstructuredSecret("ns3", "n3", true, "ns3-n3"), + spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1", *owner), + spoketesting.NewUnstructuredSecret("ns2", "n2", false, "ns2-n2", *owner), + spoketesting.NewUnstructuredSecret("ns3", "n3", true, "ns3-n3", *owner), }, appliedResources: []workapiv1.AppliedManifestResourceMeta{ {Version: "v1", Resource: "secrets", Namespace: "ns1", Name: "n1", UID: "ns1-n1"}, @@ -127,9 +133,9 @@ func TestSyncManifestWork(t *testing.T) { { name: "ignore re-created resource", existingResources: []runtime.Object{ - spoketesting.NewUnstructuredSecret("ns3", "n3", false, "ns3-n3-recreated"), - spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1"), - spoketesting.NewUnstructuredSecret("ns5", "n5", false, "ns5-n5"), + spoketesting.NewUnstructuredSecret("ns3", "n3", false, "ns3-n3-recreated", *owner), + spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1", *owner), + spoketesting.NewUnstructuredSecret("ns5", "n5", false, "ns5-n5", *owner), }, appliedResources: []workapiv1.AppliedManifestResourceMeta{ {Version: "v1", Resource: "secrets", Namespace: "ns3", Name: "n3", UID: "ns3-n3"}, @@ -156,8 +162,8 @@ func TestSyncManifestWork(t *testing.T) { { name: "update resource uid", existingResources: []runtime.Object{ - spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1"), - spoketesting.NewUnstructuredSecret("ns2", "n2", false, "ns2-n2-updated"), + spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1", *owner), + spoketesting.NewUnstructuredSecret("ns2", "n2", false, "ns2-n2-updated", *owner), }, appliedResources: []workapiv1.AppliedManifestResourceMeta{ {Version: "v1", Resource: "secrets", Namespace: "ns1", Name: "n1", UID: "ns1-n1"}, @@ -186,7 +192,7 @@ func TestSyncManifestWork(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { testingWork, _ := spoketesting.NewManifestWork(0) - testingAppliedWork := spoketesting.NewAppliedManifestWork("test", 0) + testingAppliedWork := appliedWork.DeepCopy() testingAppliedWork.Status.AppliedResources = c.appliedResources testingWork.Status.ResourceStatus.Manifests = c.manifests diff --git a/pkg/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller.go b/pkg/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller.go index fd921a5f8..bd29e9068 100644 --- a/pkg/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller.go +++ b/pkg/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller.go @@ -93,11 +93,15 @@ func (m *AppliedManifestWorkFinalizeController) syncAppliedManifestWork(ctx cont var err error + owner := helper.NewAppliedManifestWorkOwner(appliedManifestWork) + // Work is deleting, we remove its related resources on spoke cluster // We still need to run delete for every resource even with ownerref on it, since ownerref does not handle cluster // scoped resource correctly. reason := fmt.Sprintf("manifestwork %s is terminating", appliedManifestWork.Spec.ManifestWorkName) - resourcesPendingFinalization, errs := helper.DeleteAppliedResources(appliedManifestWork.Status.AppliedResources, reason, m.spokeDynamicClient, controllerContext.Recorder()) + resourcesPendingFinalization, errs := helper.DeleteAppliedResources( + appliedManifestWork.Status.AppliedResources, reason, m.spokeDynamicClient, controllerContext.Recorder(), *owner) + updatedAppliedManifestWork := false if len(appliedManifestWork.Status.AppliedResources) != len(resourcesPendingFinalization) { // update the status of the manifest work accordingly diff --git a/pkg/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller_test.go b/pkg/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller_test.go index 9990a3077..eac010052 100644 --- a/pkg/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller_test.go +++ b/pkg/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller_test.go @@ -10,16 +10,22 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" fakedynamic "k8s.io/client-go/dynamic/fake" clienttesting "k8s.io/client-go/testing" "k8s.io/client-go/util/workqueue" fakeworkclient "open-cluster-management.io/api/client/work/clientset/versioned/fake" workapiv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/work/pkg/helper" "open-cluster-management.io/work/pkg/spoke/controllers" "open-cluster-management.io/work/pkg/spoke/spoketesting" ) func TestFinalize(t *testing.T) { + uid := types.UID("test") + appliedWork := spoketesting.NewAppliedManifestWork("test", 0, uid) + owner := helper.NewAppliedManifestWorkOwner(appliedWork) + cases := []struct { name string existingFinalizers []string @@ -99,8 +105,8 @@ func TestFinalize(t *testing.T) { terminated: true, existingFinalizers: []string{controllers.AppliedManifestWorkFinalizer}, existingResources: []runtime.Object{ - spoketesting.NewUnstructuredSecret("ns1", "n1", true, "ns1-n1"), - spoketesting.NewUnstructuredSecret("ns2", "n2", true, "ns2-n2"), + spoketesting.NewUnstructuredSecret("ns1", "n1", true, "ns1-n1", *owner), + spoketesting.NewUnstructuredSecret("ns2", "n2", true, "ns2-n2", *owner), }, resourcesToRemove: []workapiv1.AppliedManifestResourceMeta{ {Version: "v1", Resource: "secrets", Namespace: "ns1", Name: "n1", UID: "ns1-n1"}, @@ -130,7 +136,7 @@ func TestFinalize(t *testing.T) { terminated: true, existingFinalizers: []string{controllers.AppliedManifestWorkFinalizer}, existingResources: []runtime.Object{ - spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1"), + spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1", *owner), }, resourcesToRemove: []workapiv1.AppliedManifestResourceMeta{ {Version: "v1", Resource: "secrets", Namespace: "ns1", Name: "n1", UID: "n1"}, @@ -173,7 +179,7 @@ func TestFinalize(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - testingWork := spoketesting.NewAppliedManifestWork("test", 0) + testingWork := appliedWork.DeepCopy() testingWork.Finalizers = c.existingFinalizers if c.terminated { now := metav1.Now() diff --git a/pkg/spoke/controllers/manifestcontroller/manifestwork_controller.go b/pkg/spoke/controllers/manifestcontroller/manifestwork_controller.go index 60c933693..24f29259f 100644 --- a/pkg/spoke/controllers/manifestcontroller/manifestwork_controller.go +++ b/pkg/spoke/controllers/manifestcontroller/manifestwork_controller.go @@ -15,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -24,6 +25,7 @@ import ( "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" workinformer "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" worklister "open-cluster-management.io/api/client/work/listers/work/v1" @@ -49,6 +51,12 @@ type ManifestWorkController struct { restMapper meta.RESTMapper } +type applyResult struct { + resourceapply.ApplyResult + + resourceMeta workapiv1.ManifestResourceMeta +} + // NewManifestWorkController returns a ManifestWorkController func NewManifestWorkController( ctx context.Context, @@ -147,9 +155,10 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac errs := []error{} // Apply resources on spoke cluster. - resourceResults := make([]resourceapply.ApplyResult, len(manifestWork.Spec.Workload.Manifests)) + resourceResults := make([]applyResult, len(manifestWork.Spec.Workload.Manifests)) retry.RetryOnConflict(retry.DefaultBackoff, func() error { - resourceResults = m.applyManifests(ctx, manifestWork.Spec.Workload.Manifests, controllerContext.Recorder(), *owner, resourceResults) + resourceResults = m.applyManifests( + ctx, manifestWork.Spec.Workload.Manifests, manifestWork.Spec.DeleteOption, controllerContext.Recorder(), *owner, resourceResults) for _, result := range resourceResults { if errors.IsConflict(result.Error) { @@ -161,18 +170,13 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac }) newManifestConditions := []workapiv1.ManifestCondition{} - for index, result := range resourceResults { + for _, result := range resourceResults { if result.Error != nil { errs = append(errs, result.Error) } - resourceMeta, err := buildManifestResourceMeta(index, result.Result, manifestWork.Spec.Workload.Manifests[index], m.restMapper) - if err != nil { - errs = append(errs, err) - } - manifestCondition := workapiv1.ManifestCondition{ - ResourceMeta: resourceMeta, + ResourceMeta: result.resourceMeta, Conditions: []metav1.Condition{}, } @@ -196,15 +200,21 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac } func (m *ManifestWorkController) applyManifests( - ctx context.Context, manifests []workapiv1.Manifest, recorder events.Recorder, owner metav1.OwnerReference, existingResults []resourceapply.ApplyResult) []resourceapply.ApplyResult { + ctx context.Context, + manifests []workapiv1.Manifest, + deleteOption *workapiv1.DeleteOption, + recorder events.Recorder, + owner metav1.OwnerReference, + existingResults []applyResult) []applyResult { + for index, manifest := range manifests { switch { case existingResults[index].Result == nil: // Apply if there is not result. - existingResults[index] = m.applyOneManifest(ctx, manifest, recorder, owner) + existingResults[index] = m.applyOneManifest(ctx, index, manifest, deleteOption, recorder, owner) case errors.IsConflict(existingResults[index].Error): // Apply if there is a resource confilct error. - existingResults[index] = m.applyOneManifest(ctx, manifest, recorder, owner) + existingResults[index] = m.applyOneManifest(ctx, index, manifest, deleteOption, recorder, owner) } } @@ -212,82 +222,173 @@ func (m *ManifestWorkController) applyManifests( } func (m *ManifestWorkController) applyOneManifest( - ctx context.Context, manifest workapiv1.Manifest, recorder events.Recorder, owner metav1.OwnerReference) resourceapply.ApplyResult { + ctx context.Context, + index int, + manifest workapiv1.Manifest, + deleteOption *workapiv1.DeleteOption, + recorder events.Recorder, + owner metav1.OwnerReference) applyResult { + clientHolder := resourceapply.NewClientHolder(). WithAPIExtensionsClient(m.spokeAPIExtensionClient). WithKubernetes(m.spokeKubeclient). WithDynamicClient(m.spokeDynamicClient) + result := applyResult{} + + resMeta, gvr, err := buildManifestResourceMeta(index, manifest, m.restMapper) + result.resourceMeta = resMeta + if err != nil { + result.Error = err + return result + } + + owner = manageOwnerRef(gvr, resMeta.Namespace, resMeta.Name, deleteOption, owner) + results := resourceapply.ApplyDirectly(ctx, clientHolder, recorder, func(name string) ([]byte, error) { unstructuredObj := &unstructured.Unstructured{} err := unstructuredObj.UnmarshalJSON(manifest.Raw) if err != nil { return nil, err } + unstructuredObj.SetOwnerReferences([]metav1.OwnerReference{owner}) return unstructuredObj.MarshalJSON() }, "manifest") + result.Result = results[0].Result + result.Changed = results[0].Changed + result.Error = results[0].Error + // Try apply with dynamic client if the manifest cannot be decoded by scheme or typed client is not found // TODO we should check the certain error. // Use dynamic client when scheme cannot decode manifest or typed client cannot handle the object - result := results[0] - if isDecodeError(result.Error) || isUnhandledError(result.Error) || isUnsupportedError(result.Error) { - result.Result, result.Changed, result.Error = m.applyUnstructrued(ctx, manifest.Raw, owner, recorder) + result.Result, result.Changed, result.Error = m.applyUnstructured(ctx, manifest.Raw, owner, deleteOption, gvr, recorder) } return result } -func (m *ManifestWorkController) decodeUnstructured(data []byte) (schema.GroupVersionResource, *unstructured.Unstructured, error) { +func (m *ManifestWorkController) decodeUnstructured(data []byte) (*unstructured.Unstructured, error) { unstructuredObj := &unstructured.Unstructured{} err := unstructuredObj.UnmarshalJSON(data) if err != nil { - return schema.GroupVersionResource{}, nil, fmt.Errorf("Failed to decode object: %w", err) - } - mapping, err := m.restMapper.RESTMapping(unstructuredObj.GroupVersionKind().GroupKind(), unstructuredObj.GroupVersionKind().Version) - if err != nil { - return schema.GroupVersionResource{}, nil, fmt.Errorf("Failed to find gvr from restmapping: %w", err) + return nil, fmt.Errorf("Failed to decode object: %w", err) } - return mapping.Resource, unstructuredObj, nil + if err != nil { + return nil, fmt.Errorf("Failed to find gvr from restmapping: %w", err) + } + + return unstructuredObj, nil } -func (m *ManifestWorkController) applyUnstructrued(ctx context.Context, data []byte, owner metav1.OwnerReference, recorder events.Recorder) (*unstructured.Unstructured, bool, error) { - gvr, required, err := m.decodeUnstructured(data) +func (m *ManifestWorkController) applyUnstructured( + ctx context.Context, + data []byte, + owner metav1.OwnerReference, + deleteOption *workapiv1.DeleteOption, + gvr schema.GroupVersionResource, + recorder events.Recorder) (*unstructured.Unstructured, bool, error) { + + required, err := m.decodeUnstructured(data) if err != nil { return nil, false, err } - required.SetOwnerReferences([]metav1.OwnerReference{owner}) existing, err := m.spokeDynamicClient. Resource(gvr). Namespace(required.GetNamespace()). Get(ctx, required.GetName(), metav1.GetOptions{}) - if errors.IsNotFound(err) { + + switch { + case errors.IsNotFound(err): actual, err := m.spokeDynamicClient.Resource(gvr).Namespace(required.GetNamespace()).Create( - ctx, required, metav1.CreateOptions{}) + ctx, resourcemerge.WithCleanLabelsAndAnnotations(required).(*unstructured.Unstructured), metav1.CreateOptions{}) recorder.Eventf(fmt.Sprintf( "%s Created", required.GetKind()), "Created %s/%s because it was missing", required.GetNamespace(), required.GetName()) return actual, true, err - } - if err != nil { + case err != nil: return nil, false, err } + // Merge OwnerRefs. + required.SetOwnerReferences([]metav1.OwnerReference{owner}) + existingOwners := existing.GetOwnerReferences() + modified := resourcemerge.BoolPtr(false) + + resourcemerge.MergeOwnerRefs(modified, &existingOwners, required.GetOwnerReferences()) + + if *modified { + required.SetOwnerReferences(existingOwners) + } + // Compare and update the unstrcuctured. if isSameUnstructured(required, existing) { return existing, false, nil } required.SetResourceVersion(existing.GetResourceVersion()) actual, err := m.spokeDynamicClient.Resource(gvr).Namespace(required.GetNamespace()).Update( - context.TODO(), required, metav1.UpdateOptions{}) + ctx, required, metav1.UpdateOptions{}) recorder.Eventf(fmt.Sprintf( "%s Updated", required.GetKind()), "Updated %s/%s", required.GetNamespace(), required.GetName()) return actual, true, err } +// manageOwnerRef return a ownerref based on the resource and the deleteOption indicating whether the owneref +// should be removed or added. If the resource is orphaned, the owner's UID is updated for removal. +func manageOwnerRef( + gvr schema.GroupVersionResource, + namespace, name string, + deleteOption *workapiv1.DeleteOption, + myOwner metav1.OwnerReference) metav1.OwnerReference { + + // Be default, it is forgound deletion. + if deleteOption == nil { + return myOwner + } + + removalKey := fmt.Sprintf("%s-", myOwner.UID) + ownerCopy := myOwner.DeepCopy() + + switch deleteOption.PropagationPolicy { + case workapiv1.DeletePropagationPolicyTypeForeground: + return myOwner + case workapiv1.DeletePropagationPolicyTypeOrphan: + ownerCopy.UID = types.UID(removalKey) + return *ownerCopy + } + + // If there is none specified selectivelyOrphan, none of the manifests should be orphaned + if deleteOption.SelectivelyOrphan == nil { + return myOwner + } + + for _, o := range deleteOption.SelectivelyOrphan.OrphaningRules { + if o.Group != gvr.Group { + continue + } + + if o.Resource != gvr.Resource { + continue + } + + if o.Name != name { + continue + } + + if o.Namespace != namespace { + continue + } + + ownerCopy.UID = types.UID(removalKey) + return *ownerCopy + } + + return myOwner +} + // generateUpdateStatusFunc returns a function which aggregates manifest conditions and generates work conditions. // Rules to generate work status conditions from manifest conditions // #1: Applied - work status condition (with type Applied) is applied if all manifest conditions (with type Applied) are applied @@ -365,6 +466,9 @@ func isSameUnstructured(obj1, obj2 *unstructured.Unstructured) bool { if !equality.Semantic.DeepEqual(obj1Copy.GetAnnotations(), obj2Copy.GetAnnotations()) { return false } + if !equality.Semantic.DeepEqual(obj1Copy.GetOwnerReferences(), obj2Copy.GetOwnerReferences()) { + return false + } // Compare semantically after removing metadata and status field delete(obj1Copy.Object, "metadata") @@ -393,7 +497,7 @@ func allInCondition(conditionType string, manifests []workapiv1.ManifestConditio return exists, exists } -func buildAppliedStatusCondition(result resourceapply.ApplyResult) metav1.Condition { +func buildAppliedStatusCondition(result applyResult) metav1.Condition { if result.Error != nil { return metav1.Condition{ Type: string(workapiv1.ManifestApplied), @@ -414,15 +518,13 @@ func buildAppliedStatusCondition(result resourceapply.ApplyResult) metav1.Condit // buildManifestResourceMeta returns resource meta for manifest. It tries to get the resource // meta from the result object in ApplyResult struct. If the resource meta is incompleted, fall // back to manifest template for the meta info. -func buildManifestResourceMeta(index int, object runtime.Object, manifest workapiv1.Manifest, restMapper meta.RESTMapper) (resourceMeta workapiv1.ManifestResourceMeta, err error) { +func buildManifestResourceMeta( + index int, + manifest workapiv1.Manifest, + restMapper meta.RESTMapper) (resourceMeta workapiv1.ManifestResourceMeta, gvr schema.GroupVersionResource, err error) { errs := []error{} - resourceMeta, err = buildResourceMeta(index, object, restMapper) - if err != nil { - errs = append(errs, err) - } else if len(resourceMeta.Kind) > 0 && len(resourceMeta.Version) > 0 && len(resourceMeta.Name) > 0 { - return resourceMeta, nil - } + var object runtime.Object // try to get resource meta from manifest if the one got from apply result is incompleted switch { @@ -432,31 +534,34 @@ func buildManifestResourceMeta(index int, object runtime.Object, manifest workap unstructuredObj := &unstructured.Unstructured{} if err = unstructuredObj.UnmarshalJSON(manifest.Raw); err != nil { errs = append(errs, err) - return resourceMeta, utilerrors.NewAggregate(errs) + return resourceMeta, gvr, utilerrors.NewAggregate(errs) } object = unstructuredObj } - resourceMeta, err = buildResourceMeta(index, object, restMapper) + resourceMeta, gvr, err = buildResourceMeta(index, object, restMapper) if err == nil { - return resourceMeta, nil + return resourceMeta, gvr, nil } - return resourceMeta, utilerrors.NewAggregate(errs) + return resourceMeta, gvr, utilerrors.NewAggregate(errs) } -func buildResourceMeta(index int, object runtime.Object, restMapper meta.RESTMapper) (resourceMeta workapiv1.ManifestResourceMeta, err error) { - resourceMeta = workapiv1.ManifestResourceMeta{ +func buildResourceMeta( + index int, + object runtime.Object, + restMapper meta.RESTMapper) (workapiv1.ManifestResourceMeta, schema.GroupVersionResource, error) { + resourceMeta := workapiv1.ManifestResourceMeta{ Ordinal: int32(index), } if object == nil || reflect.ValueOf(object).IsNil() { - return resourceMeta, err + return resourceMeta, schema.GroupVersionResource{}, nil } // set gvk gvk, err := helper.GuessObjectGroupVersionKind(object) if err != nil { - return resourceMeta, err + return resourceMeta, schema.GroupVersionResource{}, err } resourceMeta.Group = gvk.Group resourceMeta.Version = gvk.Version @@ -472,13 +577,13 @@ func buildResourceMeta(index int, object runtime.Object, restMapper meta.RESTMap // set resource if restMapper == nil { - return resourceMeta, err + return resourceMeta, schema.GroupVersionResource{}, err } mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) if err != nil { - return resourceMeta, fmt.Errorf("the server doesn't have a resource type %q", gvk.Kind) + return resourceMeta, schema.GroupVersionResource{}, fmt.Errorf("the server doesn't have a resource type %q", gvk.Kind) } resourceMeta.Resource = mapping.Resource.Resource - return resourceMeta, err + return resourceMeta, mapping.Resource, err } diff --git a/pkg/spoke/controllers/manifestcontroller/manifestwork_controller_test.go b/pkg/spoke/controllers/manifestcontroller/manifestwork_controller_test.go index 6ef951afa..8f5da6155 100644 --- a/pkg/spoke/controllers/manifestcontroller/manifestwork_controller_test.go +++ b/pkg/spoke/controllers/manifestcontroller/manifestwork_controller_test.go @@ -11,6 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/diff" fakedynamic "k8s.io/client-go/dynamic/fake" fakekube "k8s.io/client-go/kubernetes/fake" @@ -606,7 +607,7 @@ func TestBuildResourceMeta(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - actual, err := buildResourceMeta(0, c.object, c.restMapper) + actual, _, err := buildResourceMeta(0, c.object, c.restMapper) if err != nil { t.Errorf("Should be success with no err: %v", err) } @@ -627,12 +628,6 @@ func TestBuildManifestResourceMeta(t *testing.T) { restMapper meta.RESTMapper expected workapiv1.ManifestResourceMeta }{ - { - name: "extract meta from apply result", - applyResult: spoketesting.NewSecret("test1", "ns1", "value1"), - restMapper: spoketesting.NewFakeRestMapper(), - expected: workapiv1.ManifestResourceMeta{Version: "v1", Kind: "Secret", Resource: "secrets", Namespace: "ns1", Name: "test1"}, - }, { name: "fall back to manifest", manifestObject: spoketesting.NewSecret("test2", "ns2", "value2"), @@ -647,7 +642,7 @@ func TestBuildManifestResourceMeta(t *testing.T) { if c.manifestObject != nil { manifest.Object = c.manifestObject } - actual, err := buildManifestResourceMeta(0, c.applyResult, manifest, c.restMapper) + actual, _, err := buildManifestResourceMeta(0, manifest, c.restMapper) if err != nil { t.Errorf("Should be success with no err: %v", err) } @@ -659,3 +654,82 @@ func TestBuildManifestResourceMeta(t *testing.T) { }) } } + +func TestManageOwner(t *testing.T) { + testGVR := schema.GroupVersionResource{Version: "v1", Resource: "secrets"} + + namespace := "testns" + + name := "test" + + cases := []struct { + name string + deleteOption *workapiv1.DeleteOption + owner metav1.OwnerReference + expectOwner metav1.OwnerReference + }{ + { + name: "foreground by default", + owner: metav1.OwnerReference{UID: "testowner"}, + expectOwner: metav1.OwnerReference{UID: "testowner"}, + }, + { + name: "orphan the resource", + owner: metav1.OwnerReference{UID: "testowner"}, + deleteOption: &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan}, + expectOwner: metav1.OwnerReference{UID: "testowner-"}, + }, + { + name: "add owner if no orphan rule with selectively orphan", + owner: metav1.OwnerReference{UID: "testowner"}, + deleteOption: &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan}, + expectOwner: metav1.OwnerReference{UID: "testowner"}, + }, + { + name: "orphan the resource with selectively orphan", + owner: metav1.OwnerReference{UID: "testowner"}, + deleteOption: &workapiv1.DeleteOption{ + PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, + SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ + OrphaningRules: []workapiv1.OrphaningRule{ + { + Group: "", + Resource: "secrets", + Namespace: namespace, + Name: name, + }, + }, + }, + }, + expectOwner: metav1.OwnerReference{UID: "testowner-"}, + }, + { + name: "add owner if resourcec is not matched in orphan rule with selectively orphan", + owner: metav1.OwnerReference{UID: "testowner"}, + deleteOption: &workapiv1.DeleteOption{ + PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, + SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ + OrphaningRules: []workapiv1.OrphaningRule{ + { + Group: "", + Resource: "secrets", + Namespace: "testns1", + Name: name, + }, + }, + }, + }, + expectOwner: metav1.OwnerReference{UID: "testowner"}, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + owner := manageOwnerRef(testGVR, namespace, name, c.deleteOption, c.owner) + + if !equality.Semantic.DeepEqual(owner, c.expectOwner) { + t.Errorf("Expect owner is %v, but got %v", c.expectOwner, owner) + } + }) + } +} diff --git a/pkg/spoke/spoketesting/manifestwork_helpers.go b/pkg/spoke/spoketesting/manifestwork_helpers.go index 80b0e899c..1e99606c3 100644 --- a/pkg/spoke/spoketesting/manifestwork_helpers.go +++ b/pkg/spoke/spoketesting/manifestwork_helpers.go @@ -70,8 +70,8 @@ func NewUnstructuredSecretBySize(namespace, name string, size int32) *unstructur } } -func NewUnstructuredSecret(namespace, name string, terminated bool, uid string) *unstructured.Unstructured { - u := NewUnstructured("v1", "Secret", namespace, name) +func NewUnstructuredSecret(namespace, name string, terminated bool, uid string, owners ...metav1.OwnerReference) *unstructured.Unstructured { + u := NewUnstructured("v1", "Secret", namespace, name, owners...) if terminated { now := metav1.Now() u.SetDeletionTimestamp(&now) @@ -82,8 +82,8 @@ func NewUnstructuredSecret(namespace, name string, terminated bool, uid string) return u } -func NewUnstructured(apiVersion, kind, namespace, name string) *unstructured.Unstructured { - return &unstructured.Unstructured{ +func NewUnstructured(apiVersion, kind, namespace, name string, owners ...metav1.OwnerReference) *unstructured.Unstructured { + u := &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": apiVersion, "kind": kind, @@ -93,6 +93,10 @@ func NewUnstructured(apiVersion, kind, namespace, name string) *unstructured.Uns }, }, } + + u.SetOwnerReferences(owners) + + return u } func NewUnstructuredWithContent( @@ -128,11 +132,12 @@ func NewManifestWork(index int, objects ...*unstructured.Unstructured) (*workapi return work, fmt.Sprintf("%s", work.Name) } -func NewAppliedManifestWork(hash string, index int) *workapiv1.AppliedManifestWork { +func NewAppliedManifestWork(hash string, index int, uid types.UID) *workapiv1.AppliedManifestWork { workName := fmt.Sprintf("work-%d", index) return &workapiv1.AppliedManifestWork{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s", hash, workName), + UID: uid, }, Spec: workapiv1.AppliedManifestWorkSpec{ HubHash: hash, diff --git a/test/e2e/bindata/bindata.go b/test/e2e/bindata/bindata.go index ccf024b2e..5ed6f83d0 100644 --- a/test/e2e/bindata/bindata.go +++ b/test/e2e/bindata/bindata.go @@ -196,7 +196,7 @@ rules: # Allow agent to create/delete namespaces, get/list are contained in admin role already - apiGroups: [""] resources: ["namespaces"] - verbs: ["create", "delete"] + verbs: ["create", "delete", "update"] # Allow agent to manage role/rolebinding/clusterrole/clusterrolebinding - apiGroups: ["rbac.authorization.k8s.io"] resources: ["clusterrolebindings", "rolebindings"] diff --git a/test/integration/util/assertion.go b/test/integration/util/assertion.go index 6af7f7115..b4db47b7d 100644 --- a/test/integration/util/assertion.go +++ b/test/integration/util/assertion.go @@ -114,21 +114,21 @@ func AssertFinalizerAdded(namespace, name string, workClient workclientset.Inter // check if all manifests are applied func AssertExistenceOfConfigMaps(manifests []workapiv1.Manifest, kubeClient kubernetes.Interface, eventuallyTimeout, eventuallyInterval int) { - gomega.Eventually(func() bool { + gomega.Eventually(func() error { for _, manifest := range manifests { expected := manifest.Object.(*corev1.ConfigMap) actual, err := kubeClient.CoreV1().ConfigMaps(expected.Namespace).Get(context.Background(), expected.Name, metav1.GetOptions{}) if err != nil { - return false + return err } if !reflect.DeepEqual(actual.Data, expected.Data) { - return false + return fmt.Errorf("configmap should be equal to %v, but got %v", expected.Data, actual.Data) } } - return true - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) } // check the existence of resource with GVR, namespace and name diff --git a/test/integration/work_test.go b/test/integration/work_test.go index d3fa81e94..a14f0a95e 100644 --- a/test/integration/work_test.go +++ b/test/integration/work_test.go @@ -572,21 +572,20 @@ var _ = ginkgo.Describe("ManifestWork", func() { }) ginkgo.JustBeforeEach(func() { + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + // Create another manifestworks with one shared resource. - // TODO We might not want the sharing in this cases, since the content of a resource in two manifestworks - // can be different. anotherWork = util.NewManifestWork(o.SpokeClusterName, "sharing-resource-work", []workapiv1.Manifest{manifests[0]}) anotherWork, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{}) anotherAppliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, anotherWork.Name) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) - ginkgo.It("shared resource between the manifestwork should be recreated when one manifestwork is deleted", func() { + ginkgo.It("shared resource between the manifestwork should be kept when one manifestwork is deleted", func() { // Ensure two manifestworks are all applied - util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, @@ -615,12 +614,12 @@ var _ = ginkgo.Describe("ManifestWork", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - anotherappliedmanifestwork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) if err != nil { return err } - for _, appliedResource := range anotherappliedmanifestwork.Status.AppliedResources { + for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) { return nil } @@ -633,15 +632,24 @@ var _ = ginkgo.Describe("ManifestWork", func() { err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - // Ensure the configmap is recreated and tracked by anotherappliedmanifestwork. + // Ensure the appliedmanifestwork of deleted manifestwork is removed so it won't try to delete shared resource + gomega.Eventually(func() bool { + _, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return true + } + return false + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Ensure the configmap is kept and tracked by anotherappliedmanifestwork. gomega.Eventually(func() error { - recreatedConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) if err != nil { return err } - if currentUID == recreatedConfigMap.UID { - return fmt.Errorf("UID should not be equal") + if currentUID != configMap.UID { + return fmt.Errorf("UID should be equal") } anotherappliedmanifestwork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) @@ -654,31 +662,16 @@ var _ = ginkgo.Describe("ManifestWork", func() { return fmt.Errorf("Resource Name should be cm1") } - if appliedResource.UID == string(currentUID) { - return fmt.Errorf("UID should not be equal") + if appliedResource.UID != string(currentUID) { + return fmt.Errorf("UID should be equal") } } return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Ensure the appliedmanifestwork of deleted manifestwork is removed so it won't try to delete shared resource - gomega.Eventually(func() bool { - _, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return true - } - return false - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - }) - ginkgo.It("shared resource between the manifestwork should be recreated when the shared resource is removed from one manifestwork", func() { - // Ensure two manifestworks are all applied - util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + ginkgo.It("shared resource between the manifestwork should be kept when the shared resource is removed from one manifestwork", func() { util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, @@ -728,36 +721,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - // Ensure the configmap is recreated and tracked by anotherappliedmanifestwork - gomega.Eventually(func() error { - recreatedConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) - if err != nil { - return err - } - - if currentUID == recreatedConfigMap.UID { - return fmt.Errorf("UID should not be equal") - } - - anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { - if appliedResource.Name != "cm1" { - return fmt.Errorf("Resource Name should be cm1") - } - - if appliedResource.UID == string(currentUID) { - return fmt.Errorf("UID should not be equal") - } - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // ensure the resource is not tracked by the appliedmanifestwork. + // Ensure the resource is not tracked by the appliedmanifestwork. gomega.Eventually(func() bool { appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) if err != nil { @@ -772,6 +736,264 @@ var _ = ginkgo.Describe("ManifestWork", func() { return true }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Ensure the configmap is kept and tracked by anotherappliedmanifestwork + gomega.Eventually(func() error { + configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + if err != nil { + return err + } + + if currentUID != configMap.UID { + return fmt.Errorf("UID should be equal") + } + + anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { + if appliedResource.Name != "cm1" { + return fmt.Errorf("Resource Name should be cm1") + } + + if appliedResource.UID != string(currentUID) { + return fmt.Errorf("UID should be equal") + } + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + + }) + + ginkgo.Context("Delete options", func() { + ginkgo.BeforeEach(func() { + manifests = []workapiv1.Manifest{ + util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + } + }) + + ginkgo.JustBeforeEach(func() { + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + }) + + ginkgo.It("Orphan deletion of the whole manifestwork", func() { + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + work.Spec.DeleteOption = &workapiv1.DeleteOption{ + PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan, + } + + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Ensure configmap exists + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + // Ensure ownership of configmap is updated + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 0 { + return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) + + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 0 { + return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Delete the work + err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Wait for deletion of manifest work + gomega.Eventually(func() bool { + _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if !errors.IsNotFound(err) { + return false + } + + return true + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Ensure configmap exists + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + }) + + ginkgo.It("Selectively Orphan deletion of the manifestwork", func() { + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + work.Spec.DeleteOption = &workapiv1.DeleteOption{ + PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, + SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ + OrphaningRules: []workapiv1.OrphaningRule{ + { + Group: "", + Resource: "configmaps", + Namespace: o.SpokeClusterName, + Name: "cm1", + }, + }, + }, + } + + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Ensure configmap exists + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + // Ensure ownership of configmap is updated + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 0 { + return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Delete the work + err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Wait for deletion of manifest work + gomega.Eventually(func() bool { + _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if !errors.IsNotFound(err) { + return false + } + + return true + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // One of the resource should be deleted. + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) + gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) + + // One of the resource should be kept + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.It("Clean the resource when orphan deletion option is removed", func() { + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + work.Spec.DeleteOption = &workapiv1.DeleteOption{ + PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, + SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ + OrphaningRules: []workapiv1.OrphaningRule{ + { + Group: "", + Resource: "configmaps", + Namespace: o.SpokeClusterName, + Name: "cm1", + }, + }, + }, + } + + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Ensure configmap exists + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + // Ensure ownership of configmap is updated + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 0 { + return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Remove the delete option + gomega.Eventually(func() error { + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + + if err != nil { + return err + } + + work.Spec.DeleteOption = nil + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Ensure ownership of configmap is updated + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 1 { + return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Delete the work + err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Wait for deletion of manifest work + gomega.Eventually(func() bool { + _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if !errors.IsNotFound(err) { + return false + } + + return true + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // All of the resource should be deleted. + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) + gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) }) }) }) diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml index 95fe8dfd9..8c60875d8 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml @@ -82,13 +82,10 @@ spec: type: array items: type: object - required: - - domainPatterns properties: domainPatterns: - description: "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy. \n The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*." + description: "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy. \n The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*. kubebuilder:validation:MinLength=1" type: array - minItems: 1 items: type: string includeSubDomainsPolicy: @@ -103,17 +100,13 @@ spec: type: object properties: largestMaxAge: - description: The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced. + description: The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced. kubebuilder:validation:minimum=0:maximum=2147483647 type: integer format: int32 - maximum: 2147483647 - minimum: 0 smallestMaxAge: - description: The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced. + description: The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced. kubebuilder:validation:minimum=0:maximum=2147483647 type: integer format: int32 - maximum: 2147483647 - minimum: 0 namespaceSelector: description: namespaceSelector specifies a label selector such that the policy applies only to those routes that are in namespaces with labels that match the selector, and are in one of the DomainPatterns. Defaults to the empty LabelSelector, which matches everything. type: object diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go index 56d00648e..4986c44ee 100644 --- a/vendor/github.com/openshift/api/config/v1/types.go +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -323,8 +323,7 @@ type RequiredHSTSPolicy struct { // // The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. // foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*. - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:Required + // kubebuilder:validation:MinLength=1 // +required DomainPatterns []string `json:"domainPatterns"` @@ -333,6 +332,7 @@ type RequiredHSTSPolicy struct { // If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. // maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS // policy will eventually expire on that client. + // +required MaxAge MaxAgePolicy `json:"maxAge"` // preloadPolicy directs the client to include hosts in its host preload list so that @@ -355,16 +355,14 @@ type RequiredHSTSPolicy struct { type MaxAgePolicy struct { // The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age // This value can be left unspecified, in which case no upper limit is enforced. - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=2147483647 + // kubebuilder:validation:minimum=0:maximum=2147483647 LargestMaxAge *int32 `json:"largestMaxAge,omitempty"` // The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age // Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary // tool for administrators to quickly correct mistakes. // This value can be left unspecified, in which case no lower limit is enforced. - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Maximum=2147483647 + // kubebuilder:validation:minimum=0:maximum=2147483647 SmallestMaxAge *int32 `json:"smallestMaxAge,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 8287ce6e1..4cb45be7f 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -183,8 +183,8 @@ func (LeaderElection) SwaggerDoc() map[string]string { var map_MaxAgePolicy = map[string]string{ "": "MaxAgePolicy contains a numeric range for specifying a compliant HSTS max-age for the enclosing RequiredHSTSPolicy", - "largestMaxAge": "The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced.", - "smallestMaxAge": "The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced.", + "largestMaxAge": "The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced. kubebuilder:validation:minimum=0:maximum=2147483647", + "smallestMaxAge": "The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced. kubebuilder:validation:minimum=0:maximum=2147483647", } func (MaxAgePolicy) SwaggerDoc() map[string]string { @@ -212,7 +212,7 @@ func (RemoteConnectionInfo) SwaggerDoc() map[string]string { var map_RequiredHSTSPolicy = map[string]string{ "namespaceSelector": "namespaceSelector specifies a label selector such that the policy applies only to those routes that are in namespaces with labels that match the selector, and are in one of the DomainPatterns. Defaults to the empty LabelSelector, which matches everything.", - "domainPatterns": "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy.\n\nThe use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*.", + "domainPatterns": "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy.\n\nThe use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*. kubebuilder:validation:MinLength=1", "maxAge": "maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. If set to 0, it negates the effect, and hosts are removed as HSTS hosts. If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS policy will eventually expire on that client.", "preloadPolicy": "preloadPolicy directs the client to include hosts in its host preload list so that it never needs to do an initial load to get the HSTS header (note that this is not defined in RFC 6797 and is therefore client implementation-dependent).", "includeSubDomainsPolicy": "includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com", diff --git a/vendor/github.com/openshift/api/go.mod b/vendor/github.com/openshift/api/go.mod index 3eae32c0e..4256da151 100644 --- a/vendor/github.com/openshift/api/go.mod +++ b/vendor/github.com/openshift/api/go.mod @@ -9,10 +9,9 @@ require ( github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3 github.com/spf13/pflag v1.0.5 golang.org/x/tools v0.1.2 - k8s.io/api v0.22.1 - k8s.io/apimachinery v0.22.1 - k8s.io/code-generator v0.22.1 + k8s.io/api v0.22.0-rc.0 + k8s.io/apimachinery v0.22.0-rc.0 + k8s.io/code-generator v0.22.0-rc.0 k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 k8s.io/klog/v2 v2.9.0 - sigs.k8s.io/yaml v1.2.0 ) diff --git a/vendor/github.com/openshift/api/go.sum b/vendor/github.com/openshift/api/go.sum index cf5ee2857..0528f025d 100644 --- a/vendor/github.com/openshift/api/go.sum +++ b/vendor/github.com/openshift/api/go.sum @@ -250,12 +250,12 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY= -k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= -k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM= -k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= -k8s.io/code-generator v0.22.1 h1:zAcKpn+xe9Iyc4qtZlfg4tD0f+SO2h5+e/s4pZPOVhs= -k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= +k8s.io/api v0.22.0-rc.0 h1:LcnCE0nmb2CVpvmlbHkIzjZUHcVpSoNcn8mJkIo4FoQ= +k8s.io/api v0.22.0-rc.0/go.mod h1:EUcKB6RvpW74HMRUSSNwpUzrIHBdGT1FeAvOV+txic0= +k8s.io/apimachinery v0.22.0-rc.0 h1:boMGWXiuYJl4sAEMTEyWJtX4VLEPf0LZ0nUh+vNALIg= +k8s.io/apimachinery v0.22.0-rc.0/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/code-generator v0.22.0-rc.0 h1:8ZPtFa3yhlV5mz8DpLZYe7FetNH4qtZGkrDnkl2G1MU= +k8s.io/code-generator v0.22.0-rc.0/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= diff --git a/vendor/modules.txt b/vendor/modules.txt index e58bba8d2..3ff3a214f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -144,7 +144,7 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/openshift/api v0.0.0-20210831091943-07e756545ac1 +# github.com/openshift/api v0.0.0-20210730095913-85e1d547cdee github.com/openshift/api github.com/openshift/api/apiserver github.com/openshift/api/apiserver/v1 @@ -216,7 +216,7 @@ github.com/openshift/build-machinery-go/scripts github.com/openshift/generic-admission-server/pkg/apiserver github.com/openshift/generic-admission-server/pkg/cmd/server github.com/openshift/generic-admission-server/pkg/registry/admissionreview -# github.com/openshift/library-go v0.0.0-20210902020120-5ddb355aea6f +# github.com/openshift/library-go v0.0.0-20210902020120-5ddb355aea6f => github.com/qiujian16/library-go v0.0.0-20210830020149-91704dd0762a ## explicit github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer github.com/openshift/library-go/pkg/config/client @@ -1120,3 +1120,4 @@ sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml +# github.com/openshift/library-go => github.com/qiujian16/library-go v0.0.0-20210830020149-91704dd0762a