Merge pull request #19 from skeeey/adding-integration-test

add integration test cases
This commit is contained in:
OpenShift Merge Robot
2020-05-21 21:26:25 +02:00
committed by GitHub
11 changed files with 1100 additions and 54 deletions

20
go.sum
View File

@@ -84,7 +84,6 @@ github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
@@ -160,7 +159,6 @@ github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -181,7 +179,6 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -195,9 +192,7 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk=
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
@@ -219,7 +214,6 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
@@ -283,7 +277,6 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34=
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
@@ -379,7 +372,6 @@ go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@@ -420,13 +412,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -461,7 +451,6 @@ golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -496,14 +485,12 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2El
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -530,29 +517,23 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.18.0 h1:lwYk8Vt7rsVTwjRU6pzEsa9YNhThbmbocQlKvNBB4EQ=
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8=
k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
k8s.io/apiextensions-apiserver v0.18.0 h1:HN4/P8vpGZFvB5SOMuPPH2Wt9Y/ryX+KRvIyAkchu1Q=
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8=
k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY=
k8s.io/apimachinery v0.18.0 h1:fuPfYpk3cs1Okp/515pAf0dNhL66+8zk8RLbSX+EgAE=
k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA=
k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/apiserver v0.18.0 h1:ELAWpGWC6XdbRLi5lwAbEbvksD7hkXxPdxaJsdpist4=
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
k8s.io/apiserver v0.18.2 h1:fwKxdTWwwYhxvtjo0UUfX+/fsitsNtfErPNegH2x9ic=
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
k8s.io/client-go v0.18.0 h1:yqKw4cTUQraZK3fcVCMeSa+lqKwcjZ5wtcOIPnxQno4=
k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE=
k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/component-base v0.18.0 h1:I+lP0fNfsEdTDpHaL61bCAqTZLoiWjEEP304Mo5ZQgE=
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y=
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
@@ -566,7 +547,6 @@ k8s.io/kube-aggregator v0.18.0 h1:J+wa9FDQ3SbgyA8wQBNg2m2FMSm+mMQfs2A58500hs0=
k8s.io/kube-aggregator v0.18.0/go.mod h1:ateewQ5QbjMZF/dihEFXwaEwoA4v/mayRvzfmvb6eqI=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200327001022-6496210b90e8 h1:6JFbaLjRyBz8K2Jvt+pcT+N3vvwMZfg8MfVENwe9aag=
k8s.io/utils v0.0.0-20200327001022-6496210b90e8/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=

View File

@@ -134,7 +134,10 @@ func isSpokeClusterClientCertRenewal(csr *certificatesv1beta1.CertificateSigning
return false
}
if csr.Spec.SignerName == nil || *csr.Spec.SignerName != certificatesv1beta1.KubeAPIServerClientSignerName {
// The CSR signer name must be provided on Kubernetes v1.18.0 and above, so if the signer name is empty,
// we should be on an old server, we skip the signer name check
if (csr.Spec.SignerName != nil && len(*csr.Spec.SignerName) != 0) &&
*csr.Spec.SignerName != certificatesv1beta1.KubeAPIServerClientSignerName {
return false
}

View File

@@ -119,6 +119,8 @@ func TestSync(t *testing.T) {
}
func TestIsSpokeClusterClientCertRenewal(t *testing.T) {
invalidSignerName := "invalidsigner"
cases := []struct {
name string
csr *certificatesv1beta1.CertificateSigningRequest
@@ -131,7 +133,7 @@ func TestIsSpokeClusterClientCertRenewal(t *testing.T) {
},
{
name: "an invalid signer name",
csr: newCSR(labels, nil, "", []string{}, "", ""),
csr: newCSR(labels, &invalidSignerName, "", []string{}, "", ""),
isRenewal: false,
},
{
@@ -159,6 +161,11 @@ func TestIsSpokeClusterClientCertRenewal(t *testing.T) {
csr: newInvalidCSR(),
isRenewal: false,
},
{
name: "a renewal csr without signer name",
csr: newCSRWithSignerName(nil),
isRenewal: true,
},
{
name: "a renewal csr",
csr: newRenewalCSR(),
@@ -207,10 +214,10 @@ func newCSR(labels map[string]string, signerName *string, cn string, orgs []stri
}
}
func newRenewalCSR() *certificatesv1beta1.CertificateSigningRequest {
func newCSRWithSignerName(signer *string) *certificatesv1beta1.CertificateSigningRequest {
csr := newCSR(
labels,
&signerName,
signer,
"system:open-cluster-management:spokecluster1:spokeagent1",
[]string{"system:open-cluster-management:spokecluster1"},
"system:open-cluster-management:spokecluster1:spokeagent1",
@@ -220,6 +227,10 @@ func newRenewalCSR() *certificatesv1beta1.CertificateSigningRequest {
return csr
}
func newRenewalCSR() *certificatesv1beta1.CertificateSigningRequest {
return newCSRWithSignerName(&signerName)
}
func newInvalidCSR() *certificatesv1beta1.CertificateSigningRequest {
csr := newCSR(
labels,

View File

@@ -41,6 +41,9 @@ const (
AgentNameFile = "agent-name"
)
// ControllerSyncInterval is exposed so that integration tests can crank up the constroller sync speed.
var ControllerSyncInterval = 5 * time.Minute
// ClientCertForHubController maintains the client cert and kubeconfig for hub
type ClientCertForHubController struct {
clusterName string
@@ -97,7 +100,7 @@ func NewClientCertForHubController(
return factory.New().
WithInformers(hubCSRInformer.Informer(), spokeSecretInformer.Informer()).
WithSync(c.sync).
ResyncEvery(5*time.Minute).
ResyncEvery(ControllerSyncInterval).
ToController(controllerName, recorder)
}
@@ -241,14 +244,15 @@ func (c *ClientCertForHubController) syncCSR(secret *corev1.Secret) (map[string]
// create a kubeconfig with references to the key/cert files in kubeconfigSecret if it dose not exists.
// So other components deployed in separated deployments are able to access this kubeconfig for hub as
// well by sharing the secret
if _, ok := secret.Data[KubeconfigFile]; !ok {
kubeconfigData, ok := secret.Data[KubeconfigFile]
if !ok {
kubeconfig := buildKubeconfig(restclient.CopyConfig(c.hubClientConfig), TLSCertFile, TLSKeyFile)
kubeconfigData, err := clientcmd.Write(kubeconfig)
kubeconfigData, err = clientcmd.Write(kubeconfig)
if err != nil {
return nil, err
}
data[KubeconfigFile] = kubeconfigData
}
data[KubeconfigFile] = kubeconfigData
// clear the csr name and private key
c.reset()

View File

@@ -18,6 +18,9 @@ import (
// well-known anonymous user
const anonymous = "system:anonymous"
// CreatingControllerSyncInterval is exposed so that integration tests can crank up the constroller sync speed.
var CreatingControllerSyncInterval = 60 * time.Minute
// spokeClusterCreatingController creates a spoke cluster on hub cluster during the spoke agent bootstrap phase
type spokeClusterCreatingController struct {
clusterName string
@@ -40,7 +43,7 @@ func NewSpokeClusterCreatingController(
}
return factory.New().
WithSync(c.sync).
ResyncEvery(60*time.Minute).
ResyncEvery(CreatingControllerSyncInterval).
ToController("SpokeClusterCreatingController", recorder)
}

View File

@@ -0,0 +1,84 @@
package integration_test
import (
"context"
"path"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"github.com/open-cluster-management/registration/pkg/spoke"
"github.com/open-cluster-management/registration/test/integration/util"
"github.com/openshift/library-go/pkg/controller/controllercmd"
)
var _ = ginkgo.Describe("Certificate Rotation", func() {
ginkgo.It("Certificate should be automatically rotated when it is about to expire", func() {
var err error
spokeClusterName := "rotationtest-spokecluster"
hubKubeconfigSecret := "rotationtest-hub-kubeconfig-secret"
hubKubeconfigDir := path.Join(util.TestDir, "rotationtest", "hub-kubeconfig")
// run registration agent
go func() {
agentOptions := spoke.SpokeAgentOptions{
ClusterName: spokeClusterName,
BootstrapKubeconfig: bootstrapKubeConfigFile,
HubKubeconfigSecret: hubKubeconfigSecret,
HubKubeconfigDir: hubKubeconfigDir,
}
err := agentOptions.RunSpokeAgent(context.Background(), &controllercmd.ControllerContext{
KubeConfig: spokeCfg,
EventRecorder: util.NewIntegrationTestEventRecorder("rotationtest"),
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
// after bootstrap the spokecluster and csr should be created
gomega.Eventually(func() bool {
if _, err := util.GetSpokeCluster(clusterClient, spokeClusterName); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
gomega.Eventually(func() bool {
if _, err := util.FindUnapprovedSpokeCSR(kubeClient, spokeClusterName); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// simulate hub cluster admin approve the csr with a short time certificate
err = util.ApproveSpokeClusterCSR(kubeClient, spokeClusterName, time.Second*20)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// simulate hub cluster admin accept the spokecluster
err = util.AcceptSpokeCluster(clusterClient, spokeClusterName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// the hub kubeconfig secret should be filled after the csr is approved
gomega.Eventually(func() bool {
if _, err := util.GetFilledHubKubeConfigSecret(kubeClient, testNamespace, hubKubeconfigSecret); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// simulate k8s to mount the hub kubeconfig secret
err = util.MountHubKubeConfigs(kubeClient, hubKubeconfigDir, testNamespace, hubKubeconfigSecret)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// the agent should rotate the certificate because the certificate with a short valid time
// the hub controller should auto approve it
gomega.Eventually(func() bool {
if _, err := util.FindAutoApprovedSpokeCSR(kubeClient, spokeClusterName); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
})
})

6
test/integration/doc.go Normal file
View File

@@ -0,0 +1,6 @@
// Package integration provides integration tests for open-cluster-management registration, the test cases include
// - spoke cluster joining process
// - spoke registration rotate its certificate after its certificate is expired
// - spoke registration agent recovery from invalid bootstrap kubeconfig
// - spoke registration agent recovery from invalid hub kubeconfig
package integration

View File

@@ -1,62 +1,147 @@
package integration_test
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"github.com/openshift/library-go/pkg/controller/controllercmd"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/transport"
clusterclientset "github.com/open-cluster-management/api/client/cluster/clientset/versioned"
clusterv1 "github.com/open-cluster-management/api/cluster/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/open-cluster-management/registration/pkg/hub"
"github.com/open-cluster-management/registration/pkg/spoke/hubclientcert"
"github.com/open-cluster-management/registration/pkg/spoke/spokecluster"
"github.com/open-cluster-management/registration/test/integration/util"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
)
var cfg *rest.Config
const (
eventuallyTimeout = 30 // seconds
eventuallyInterval = 1 // seconds
)
var spokeCfg *rest.Config
var bootstrapKubeConfigFile string
var testEnv *envtest.Environment
var k8sClient client.Client
var securePort int
var kubeClient kubernetes.Interface
var clusterClient clusterclientset.Interface
var testNamespace string
func TestIntegration(t *testing.T) {
RegisterFailHandler(Fail)
// TODO test cases
// - spoke registration agent creates CSR, hub authorizes, spoke agent creates hub kubeconfig and connects back to hub for successful join
// - spoke registration agent recovery from invalid bootstrap kubeconfig
// - spoke registration agent recovery from invalid hub kubeconfig
// - spoke registration rotate its certificate after its certificate is expired
RunSpecsWithDefaultAndCustomReporters(t, "Integration Suite", []Reporter{printer.NewlineReporter{}})
gomega.RegisterFailHandler(ginkgo.Fail)
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Integration Suite", []ginkgo.Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
var _ = ginkgo.BeforeSuite(func(done ginkgo.Done) {
logf.SetLogger(zap.LoggerTo(ginkgo.GinkgoWriter, true))
By("bootstrapping test environment")
ginkgo.By("bootstrapping test environment")
var err error
// crank up the sync speed
transport.CertCallbackRefreshDuration = 5 * time.Second
hubclientcert.ControllerSyncInterval = 5 * time.Second
spokecluster.CreatingControllerSyncInterval = 1 * time.Second
// install cluster CRD and start a local kube-apiserver
err = util.GenerateSelfSignedCertKey()
gomega.Expect(err).ToNot(gomega.HaveOccurred())
apiServerFlags := append([]string{}, envtest.DefaultKubeAPIServerFlags...)
apiServerFlags = append(apiServerFlags,
fmt.Sprintf("--client-ca-file=%s", util.CAFile),
fmt.Sprintf("--tls-cert-file=%s", util.ServerCertFile),
fmt.Sprintf("--tls-private-key-file=%s", util.ServerKeyFile),
)
testEnv = &envtest.Environment{
ErrorIfCRDPathMissing: true,
CRDDirectoryPaths: []string{
filepath.Join("..", "..", "vendor", "github.com", "open-cluster-management", "api", "cluster", "v1"),
filepath.Join(".", "vendor", "github.com", "open-cluster-management", "api", "cluster", "v1"),
},
KubeAPIServerFlags: apiServerFlags,
}
cfg, err := testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Expect(cfg).ToNot(gomega.BeNil())
err = clusterv1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// prepare configs
securePort = testEnv.ControlPlane.APIServer.SecurePort
gomega.Expect(securePort).ToNot(gomega.BeZero())
spokeCfg, err = util.CreateSpokeKubeConfig(cfg, securePort)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(spokeCfg).ToNot(gomega.BeNil())
bootstrapKubeConfigFile = path.Join(util.TestDir, "bootstrap", "kubeconfig")
err = util.CreateBootstrapKubeConfig(bootstrapKubeConfigFile, securePort)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// prepare clients
kubeClient, err = kubernetes.NewForConfig(cfg)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Expect(kubeClient).ToNot(gomega.BeNil())
clusterClient, err = clusterclientset.NewForConfig(cfg)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Expect(clusterClient).ToNot(gomega.BeNil())
// prepare test namespace
nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
if err != nil {
testNamespace = "open-cluster-management"
} else {
testNamespace = string(nsBytes)
}
err = util.PrepareSpokeAgentNamespace(kubeClient, testNamespace)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// start hub controller
go func() {
err := hub.RunControllerManager(context.Background(), &controllercmd.ControllerContext{
KubeConfig: cfg,
EventRecorder: util.NewIntegrationTestEventRecorder("hub"),
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
var _ = ginkgo.AfterSuite(func() {
ginkgo.By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
gomega.Expect(err).ToNot(gomega.HaveOccurred())
err = os.RemoveAll(util.TestDir)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})

View File

@@ -0,0 +1,214 @@
package integration_test
import (
"context"
"path"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
clusterv1 "github.com/open-cluster-management/api/cluster/v1"
"github.com/open-cluster-management/registration/pkg/helpers"
"github.com/open-cluster-management/registration/pkg/spoke"
"github.com/open-cluster-management/registration/test/integration/util"
"github.com/openshift/library-go/pkg/controller/controllercmd"
"k8s.io/apimachinery/pkg/api/errors"
)
var _ = ginkgo.Describe("Agent Recovery", func() {
ginkgo.It("agent recovery from invalid bootstrap kubeconfig", func() {
var err error
spokeClusterName := "bootstrap-recoverytest-spokecluster"
hubKubeconfigSecret := "bootstrap-recoverytest-hub-kubeconfig-secret"
hubKubeconfigDir := path.Join(util.TestDir, "bootstrap-recoverytest", "hub-kubeconfig")
bootstrapFile := path.Join(util.TestDir, "bootstrap-recoverytest", "kubeconfig")
// create an INVALID bootstrap kubeconfig file with an expired cert
err = util.CreateBootstrapKubeConfigWithCertAge(bootstrapFile, securePort, -1*time.Hour)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// run registration agent with an invalid bootstrap kubeconfig
go func() {
agentOptions := spoke.SpokeAgentOptions{
ClusterName: spokeClusterName,
BootstrapKubeconfig: bootstrapFile,
HubKubeconfigSecret: hubKubeconfigSecret,
HubKubeconfigDir: hubKubeconfigDir,
}
err := agentOptions.RunSpokeAgent(context.Background(), &controllercmd.ControllerContext{
KubeConfig: spokeCfg,
EventRecorder: util.NewIntegrationTestEventRecorder("bootstrap-recoverytest"),
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
// the spokecluster should not be created
retryToGetSpokeClusterTimes := 0
gomega.Eventually(func() int {
_, err = util.GetSpokeCluster(clusterClient, spokeClusterName)
gomega.Expect(err).To(gomega.HaveOccurred())
gomega.Expect(errors.IsNotFound(err)).Should(gomega.BeTrue())
retryToGetSpokeClusterTimes = retryToGetSpokeClusterTimes + 1
return retryToGetSpokeClusterTimes
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNumerically(">=", 3))
// the csr should not be created
retryToGetSpokeCSRTimes := 0
gomega.Eventually(func() int {
_, err := util.FindUnapprovedSpokeCSR(kubeClient, spokeClusterName)
gomega.Expect(err).To(gomega.HaveOccurred())
retryToGetSpokeCSRTimes = retryToGetSpokeCSRTimes + 1
return retryToGetSpokeCSRTimes
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNumerically(">=", 3))
// recover the invalid bootstrap kubeconfig file
err = util.CreateBootstrapKubeConfigWithCertAge(bootstrapFile, securePort, 24*time.Hour)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// the csr should be created after the bootstrap kubeconfig was recovered
gomega.Eventually(func() bool {
if _, err := util.FindUnapprovedSpokeCSR(kubeClient, spokeClusterName); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// the spoke cluster should be created after the bootstrap kubeconfig was recovered
gomega.Eventually(func() bool {
if _, err := util.GetSpokeCluster(clusterClient, spokeClusterName); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// simulate hub cluster admin accept the spoke cluster and approve the csr
err = util.AcceptSpokeCluster(clusterClient, spokeClusterName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = util.ApproveSpokeClusterCSR(kubeClient, spokeClusterName, time.Hour*24)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// the hub kubeconfig secret should be filled after the csr is approved
gomega.Eventually(func() bool {
if _, err := util.GetFilledHubKubeConfigSecret(kubeClient, testNamespace, hubKubeconfigSecret); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// simulate k8s to mount the hub kubeconfig secret
err = util.MountHubKubeConfigs(kubeClient, hubKubeconfigDir, testNamespace, hubKubeconfigSecret)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// the spoke cluster should have joined condition finally
gomega.Eventually(func() bool {
spokeCluster, err := util.GetSpokeCluster(clusterClient, spokeClusterName)
if err != nil {
return false
}
joined := helpers.FindSpokeClusterCondition(spokeCluster.Status.Conditions, clusterv1.SpokeClusterConditionJoined)
if joined == nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
})
ginkgo.It("agent recovery from invalid hub kubeconfig", func() {
var err error
spokeClusterName := "hubkubeconfig-recoverytest-spokecluster"
hubKubeconfigSecret := "hubkubeconfig-recoverytest-hub-kubeconfig-secret"
hubKubeconfigDir := path.Join(util.TestDir, "hubkubeconfig-recoverytest", "hub-kubeconfig")
// run registration agent
go func() {
agentOptions := spoke.SpokeAgentOptions{
ClusterName: spokeClusterName,
BootstrapKubeconfig: bootstrapKubeConfigFile,
HubKubeconfigSecret: hubKubeconfigSecret,
HubKubeconfigDir: hubKubeconfigDir,
}
err := agentOptions.RunSpokeAgent(context.Background(), &controllercmd.ControllerContext{
KubeConfig: spokeCfg,
EventRecorder: util.NewIntegrationTestEventRecorder("hubkubeconfig-recoverytest"),
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
// after bootstrap the spokecluster and csr should be created
gomega.Eventually(func() bool {
if _, err := util.GetSpokeCluster(clusterClient, spokeClusterName); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
var firstCSRName string
gomega.Eventually(func() bool {
csr, err := util.FindUnapprovedSpokeCSR(kubeClient, spokeClusterName)
if err != nil {
return false
}
firstCSRName = csr.Name
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// simulate hub cluster admin accept the spoke cluster
err = util.AcceptSpokeCluster(clusterClient, spokeClusterName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// simulate hub cluster admin approve the csr with an INVALID hub config
err = util.ApproveSpokeClusterCSRWithExpiredCert(kubeClient, spokeClusterName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// agent should bootstrap again due to the invalid hub config
var secondCSRName string
gomega.Eventually(func() bool {
csr, err := util.FindUnapprovedSpokeCSR(kubeClient, spokeClusterName)
if err != nil {
return false
}
secondCSRName = csr.Name
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// a new csr should be recreated
gomega.Expect(firstCSRName).ShouldNot(gomega.BeEquivalentTo(secondCSRName))
// approve the new csr
err = util.ApproveSpokeClusterCSR(kubeClient, spokeClusterName, time.Hour*24)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// the hub kubeconfig secret should be filled after the csr is approved
gomega.Eventually(func() bool {
if _, err := util.GetFilledHubKubeConfigSecret(kubeClient, testNamespace, hubKubeconfigSecret); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// simulate k8s to mount the hub kubeconfig secret
err = util.MountHubKubeConfigs(kubeClient, hubKubeconfigDir, testNamespace, hubKubeconfigSecret)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// the spoke cluster should have joined condition finally
gomega.Eventually(func() bool {
spokeCluster, err := util.GetSpokeCluster(clusterClient, spokeClusterName)
if err != nil {
return false
}
joined := helpers.FindSpokeClusterCondition(spokeCluster.Status.Conditions, clusterv1.SpokeClusterConditionJoined)
if joined == nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
})
})

View File

@@ -0,0 +1,115 @@
package integration_test
import (
"context"
"path"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
clusterv1 "github.com/open-cluster-management/api/cluster/v1"
"github.com/open-cluster-management/registration/pkg/helpers"
"github.com/open-cluster-management/registration/pkg/spoke"
"github.com/open-cluster-management/registration/test/integration/util"
"github.com/openshift/library-go/pkg/controller/controllercmd"
)
var _ = ginkgo.Describe("Joining Process", func() {
ginkgo.It("spokecluster should join successfully", func() {
var err error
spokeClusterName := "joiningtest-spokecluster"
hubKubeconfigSecret := "joiningtest-hub-kubeconfig-secret"
hubKubeconfigDir := path.Join(util.TestDir, "joiningtest", "hub-kubeconfig")
// run registration agent
go func() {
agentOptions := spoke.SpokeAgentOptions{
ClusterName: spokeClusterName,
BootstrapKubeconfig: bootstrapKubeConfigFile,
HubKubeconfigSecret: hubKubeconfigSecret,
HubKubeconfigDir: hubKubeconfigDir,
}
err := agentOptions.RunSpokeAgent(context.Background(), &controllercmd.ControllerContext{
KubeConfig: spokeCfg,
EventRecorder: util.NewIntegrationTestEventRecorder("joiningtest"),
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
// the spoke cluster and csr should be created after bootstrap
gomega.Eventually(func() bool {
if _, err := util.GetSpokeCluster(clusterClient, spokeClusterName); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
gomega.Eventually(func() bool {
if _, err := util.FindUnapprovedSpokeCSR(kubeClient, spokeClusterName); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// the spoke cluster should has finalizer that is added by hub controller
gomega.Eventually(func() bool {
spokeCluster, err := util.GetSpokeCluster(clusterClient, spokeClusterName)
if err != nil {
return false
}
if len(spokeCluster.Finalizers) != 1 ||
spokeCluster.Finalizers[0] != "cluster.open-cluster-management.io/api-resource-cleanup" {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// simulate hub cluster admin to accept the spokecluster and approve the csr
err = util.AcceptSpokeCluster(clusterClient, spokeClusterName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = util.ApproveSpokeClusterCSR(kubeClient, spokeClusterName, time.Hour*24)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// the spoke cluster should have accepted condition after it is accepted
gomega.Eventually(func() bool {
spokeCluster, err := util.GetSpokeCluster(clusterClient, spokeClusterName)
if err != nil {
return false
}
accpeted := helpers.FindSpokeClusterCondition(spokeCluster.Status.Conditions, clusterv1.SpokeClusterConditionHubAccepted)
if accpeted == nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// the hub kubeconfig secret should be filled after the csr is approved
gomega.Eventually(func() bool {
if _, err := util.GetFilledHubKubeConfigSecret(kubeClient, testNamespace, hubKubeconfigSecret); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// simulate k8s to mount the hub kubeconfig secret
err = util.MountHubKubeConfigs(kubeClient, hubKubeconfigDir, testNamespace, hubKubeconfigSecret)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// the spoke cluster should have joined condition finally
gomega.Eventually(func() bool {
spokeCluster, err := util.GetSpokeCluster(clusterClient, spokeClusterName)
if err != nil {
return false
}
joined := helpers.FindSpokeClusterCondition(spokeCluster.Status.Conditions, clusterv1.SpokeClusterConditionJoined)
if joined == nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
})
})

View File

@@ -0,0 +1,541 @@
package util
import (
"bytes"
"context"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
"math/big"
"net"
"os"
"path"
"time"
"github.com/onsi/ginkgo"
clusterclientset "github.com/open-cluster-management/api/client/cluster/clientset/versioned"
clusterv1 "github.com/open-cluster-management/api/cluster/v1"
"github.com/openshift/library-go/pkg/operator/events"
certificates "k8s.io/api/certificates/v1beta1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
)
const TestDir = "/tmp/registration-integration-test"
var (
CertDir = path.Join(TestDir, "server-certs")
CAFile = path.Join(CertDir, "ca.crt")
CAKeyFile = path.Join(CertDir, "ca.key")
ServerCertFile = path.Join(CertDir, "apiserver.crt")
ServerKeyFile = path.Join(CertDir, "apiserver.key")
)
func CreateBootstrapKubeConfig(configFileName string, securePort int) error {
caData, err := ioutil.ReadFile(CAFile)
if err != nil {
return err
}
config := clientcmdapi.NewConfig()
config.Clusters["hub"] = &clientcmdapi.Cluster{
Server: fmt.Sprintf("https://127.0.0.1:%d", securePort),
CertificateAuthorityData: caData,
}
config.AuthInfos["bootstrap"] = &clientcmdapi.AuthInfo{
ClientCertificate: ServerCertFile,
ClientKey: ServerKeyFile,
}
config.Contexts["bootstrap"] = &clientcmdapi.Context{
Cluster: "hub",
AuthInfo: "bootstrap",
}
config.CurrentContext = "bootstrap"
return clientcmd.WriteToFile(*config, configFileName)
}
func CreateBootstrapKubeConfigWithCertAge(configFileName string, securePort int, certAge time.Duration) error {
caData, err := ioutil.ReadFile(CAFile)
if err != nil {
return err
}
certData, keyData, err := SignAPIServerCertKeyWithCA(certAge)
if err != nil {
return err
}
configDir := path.Dir(configFileName)
if _, err := os.Stat(configDir); os.IsNotExist(err) {
if err = os.MkdirAll(configDir, 0755); err != nil {
return err
}
}
if err := ioutil.WriteFile(path.Join(configDir, "bootstrap.crt"), certData, 0644); err != nil {
return err
}
if err := ioutil.WriteFile(path.Join(configDir, "bootstrap.key"), keyData, 0644); err != nil {
return err
}
config := clientcmdapi.NewConfig()
config.Clusters["hub"] = &clientcmdapi.Cluster{
Server: fmt.Sprintf("https://127.0.0.1:%d", securePort),
CertificateAuthorityData: caData,
}
config.AuthInfos["bootstrap"] = &clientcmdapi.AuthInfo{
ClientCertificate: path.Join(configDir, "bootstrap.crt"),
ClientKey: path.Join(configDir, "bootstrap.key"),
}
config.Contexts["bootstrap"] = &clientcmdapi.Context{
Cluster: "hub",
AuthInfo: "bootstrap",
}
config.CurrentContext = "bootstrap"
return clientcmd.WriteToFile(*config, configFileName)
}
func CreateSpokeKubeConfig(restConfig *rest.Config, securePort int) (*rest.Config, error) {
spokeConfig := rest.CopyConfig(restConfig)
spokeConfig.Host = fmt.Sprintf("127.0.0.1:%d", securePort)
caData, err := ioutil.ReadFile(CAFile)
if err != nil {
return nil, err
}
spokeConfig.CAData = caData
certData, err := ioutil.ReadFile(ServerCertFile)
if err != nil {
return nil, err
}
spokeConfig.CertData = certData
keyData, err := ioutil.ReadFile(ServerKeyFile)
if err != nil {
return nil, err
}
spokeConfig.KeyData = keyData
return spokeConfig, nil
}
func GenerateSelfSignedCertKey() error {
if _, err := os.Stat(CertDir); os.IsNotExist(err) {
if err = os.MkdirAll(CertDir, 0755); err != nil {
return err
}
}
now := time.Now()
maxAge := time.Hour * 24
// generate ca cert and key
caKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return err
}
caTemplate := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{CommonName: "127.0.0.1"},
NotBefore: now.UTC(),
NotAfter: now.Add(maxAge).UTC(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
}
caDERBytes, err := x509.CreateCertificate(rand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey)
if err != nil {
return err
}
caCertBuffer := bytes.Buffer{}
if err := pem.Encode(&caCertBuffer, &pem.Block{Type: certutil.CertificateBlockType, Bytes: caDERBytes}); err != nil {
return err
}
if err := ioutil.WriteFile(CAFile, caCertBuffer.Bytes(), 0644); err != nil {
return err
}
caKeyBuffer := bytes.Buffer{}
if err := pem.Encode(
&caKeyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(caKey)}); err != nil {
return err
}
if err := ioutil.WriteFile(CAKeyFile, caKeyBuffer.Bytes(), 0644); err != nil {
return err
}
serverCert, serverKey, err := SignAPIServerCertKeyWithCA(maxAge)
if err != nil {
return err
}
if err := ioutil.WriteFile(ServerCertFile, serverCert, 0644); err != nil {
return err
}
if err := ioutil.WriteFile(ServerKeyFile, serverKey, 0644); err != nil {
return err
}
return nil
}
func SignAPIServerCertKeyWithCA(maxAge time.Duration) ([]byte, []byte, error) {
now := time.Now()
caData, err := ioutil.ReadFile(CAFile)
if err != nil {
return nil, nil, err
}
caBlock, _ := pem.Decode(caData)
caCert, err := x509.ParseCertificate(caBlock.Bytes)
if err != nil {
return nil, nil, err
}
caKeyData, err := ioutil.ReadFile(CAKeyFile)
if err != nil {
return nil, nil, err
}
keyBlock, _ := pem.Decode(caKeyData)
caKey, err := x509.ParsePKCS1PrivateKey(keyBlock.Bytes)
if err != nil {
return nil, nil, err
}
serverKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil, nil, err
}
serverDERBytes, err := x509.CreateCertificate(
rand.Reader,
&x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{Organization: []string{"registration.integration.test"}, CommonName: "127.0.0.1"},
NotBefore: now.UTC(),
NotAfter: now.Add(maxAge).UTC(),
BasicConstraintsValid: false,
IsCA: false,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDataEncipherment,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
IPAddresses: []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("10.0.0.0")},
DNSNames: []string{
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local",
},
},
caCert,
&serverKey.PublicKey,
caKey,
)
if err != nil {
return nil, nil, err
}
certBuffer := bytes.Buffer{}
if err := pem.Encode(&certBuffer, &pem.Block{Type: certutil.CertificateBlockType, Bytes: serverDERBytes}); err != nil {
return nil, nil, err
}
keyBuffer := bytes.Buffer{}
if err := pem.Encode(
&keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(serverKey)}); err != nil {
return nil, nil, err
}
return certBuffer.Bytes(), keyBuffer.Bytes(), nil
}
func PrepareSpokeAgentNamespace(kubeClient kubernetes.Interface, namespace string) error {
_, err := kubeClient.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{})
if err == nil {
return nil
}
if !errors.IsNotFound(err) {
return err
}
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
_, err = kubeClient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
return err
}
func GetFilledHubKubeConfigSecret(kubeClient kubernetes.Interface, secretNamespace, secretName string) (*corev1.Secret, error) {
secret, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(context.TODO(), secretName, metav1.GetOptions{})
if err != nil {
return nil, err
}
if _, existed := secret.Data["cluster-name"]; !existed {
return nil, fmt.Errorf("cluster-name is not found\n")
}
if _, existed := secret.Data["agent-name"]; !existed {
return nil, fmt.Errorf("agent-name is not found\n")
}
if _, existed := secret.Data["kubeconfig"]; !existed {
return nil, fmt.Errorf("kubeconfig is not found\n")
}
if _, existed := secret.Data["tls.crt"]; !existed {
return nil, fmt.Errorf("tls.crt is not found\n")
}
if _, existed := secret.Data["tls.key"]; !existed {
return nil, fmt.Errorf("tls.key is not found\n")
}
return secret, nil
}
func MountHubKubeConfigs(kubeClient kubernetes.Interface, hubKubeConfigDir, secretNamespace, secretName string) error {
if _, err := os.Stat(hubKubeConfigDir); os.IsNotExist(err) {
if err = os.MkdirAll(hubKubeConfigDir, 0755); err != nil {
return err
}
}
secret, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(context.TODO(), secretName, metav1.GetOptions{})
if err != nil {
return err
}
if err := ioutil.WriteFile(path.Join(hubKubeConfigDir, "cluster-name"), secret.Data["cluster-name"], 0644); err != nil {
return err
}
if err := ioutil.WriteFile(path.Join(hubKubeConfigDir, "agent-name"), secret.Data["agent-name"], 0644); err != nil {
return err
}
if err := ioutil.WriteFile(path.Join(hubKubeConfigDir, "tls.crt"), secret.Data["tls.crt"], 0644); err != nil {
return err
}
if err := ioutil.WriteFile(path.Join(hubKubeConfigDir, "tls.key"), secret.Data["tls.key"], 0644); err != nil {
return err
}
if err := ioutil.WriteFile(path.Join(hubKubeConfigDir, "kubeconfig"), secret.Data["kubeconfig"], 0644); err != nil {
return err
}
return nil
}
func FindUnapprovedSpokeCSR(kubeClient kubernetes.Interface, spokeClusterName string) (*certificates.CertificateSigningRequest, error) {
csrList, err := kubeClient.CertificatesV1beta1().CertificateSigningRequests().List(context.TODO(), metav1.ListOptions{
LabelSelector: fmt.Sprintf("open-cluster-management.io/cluster-name=%s", spokeClusterName),
})
if err != nil {
return nil, err
}
var unapproved *certificates.CertificateSigningRequest
for _, csr := range csrList.Items {
if len(csr.Status.Conditions) == 0 {
unapproved = csr.DeepCopy()
break
}
}
if unapproved == nil {
return nil, fmt.Errorf("failed to find unapproved csr for spoke cluster %q\n", spokeClusterName)
}
return unapproved, nil
}
func FindAutoApprovedSpokeCSR(kubeClient kubernetes.Interface, spokeClusterName string) (*certificates.CertificateSigningRequest, error) {
csrList, err := kubeClient.CertificatesV1beta1().CertificateSigningRequests().List(context.TODO(), metav1.ListOptions{
LabelSelector: fmt.Sprintf("open-cluster-management.io/cluster-name=%s", spokeClusterName),
})
if err != nil {
return nil, err
}
var autoApproved *certificates.CertificateSigningRequest
for _, csr := range csrList.Items {
if len(csr.Status.Conditions) == 0 {
continue
}
cond := csr.Status.Conditions[0]
if cond.Type == certificates.CertificateApproved &&
cond.Reason == "AutoApprovedByHubCSRApprovingController" {
autoApproved = csr.DeepCopy()
break
}
}
if autoApproved == nil {
return nil, fmt.Errorf("failed to find autoapproved csr for spoke cluster %q\n", spokeClusterName)
}
return autoApproved, nil
}
func ApproveSpokeClusterCSRWithExpiredCert(kubeClient kubernetes.Interface, spokeClusterName string) error {
now := time.Now()
csr, err := FindUnapprovedSpokeCSR(kubeClient, spokeClusterName)
if err != nil {
return err
}
return ApproveCSR(kubeClient, csr, now.UTC(), now.Add(-1*time.Hour).UTC())
}
func ApproveSpokeClusterCSR(kubeClient kubernetes.Interface, spokeClusterName string, certAge time.Duration) error {
now := time.Now()
csr, err := FindUnapprovedSpokeCSR(kubeClient, spokeClusterName)
if err != nil {
return err
}
return ApproveCSR(kubeClient, csr, now.UTC(), now.Add(certAge).UTC())
}
func ApproveCSR(kubeClient kubernetes.Interface, csr *certificates.CertificateSigningRequest, notBefore, notAfter time.Time) error {
block, _ := pem.Decode(csr.Spec.Request)
cr, err := x509.ParseCertificateRequest(block.Bytes)
if err != nil {
return err
}
caData, err := ioutil.ReadFile(CAFile)
if err != nil {
return err
}
caBlock, _ := pem.Decode(caData)
caCert, err := x509.ParseCertificate(caBlock.Bytes)
if err != nil {
return err
}
caKeyData, err := ioutil.ReadFile(CAKeyFile)
if err != nil {
return err
}
keyBlock, _ := pem.Decode(caKeyData)
caKey, err := x509.ParsePKCS1PrivateKey(keyBlock.Bytes)
if err != nil {
return err
}
certDERBytes, err := x509.CreateCertificate(
rand.Reader,
&x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{Organization: cr.Subject.Organization, CommonName: cr.Subject.CommonName},
NotBefore: notBefore,
NotAfter: notAfter,
BasicConstraintsValid: false,
IsCA: false,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
},
caCert,
cr.PublicKey,
caKey,
)
if err != nil {
return err
}
certBuffer := bytes.Buffer{}
if err := pem.Encode(&certBuffer, &pem.Block{Type: certutil.CertificateBlockType, Bytes: certDERBytes}); err != nil {
return err
}
// set cert
csr.Status = certificates.CertificateSigningRequestStatus{
Certificate: certBuffer.Bytes(),
Conditions: []certificates.CertificateSigningRequestCondition{},
}
_, err = kubeClient.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(context.TODO(), csr, metav1.UpdateOptions{})
if err != nil {
return err
}
// approve the csr
approved, err := kubeClient.CertificatesV1beta1().CertificateSigningRequests().Get(context.TODO(), csr.Name, metav1.GetOptions{})
if err != nil {
return err
}
approved.Status.Conditions = append(approved.Status.Conditions, certificates.CertificateSigningRequestCondition{
Type: certificates.CertificateApproved,
Reason: "Approved",
Message: "CSR Approved.",
LastUpdateTime: metav1.Now(),
})
_, err = kubeClient.CertificatesV1beta1().CertificateSigningRequests().UpdateApproval(context.TODO(), approved, metav1.UpdateOptions{})
return err
}
func GetSpokeCluster(clusterClient clusterclientset.Interface, spokeClusterName string) (*clusterv1.SpokeCluster, error) {
spokeCluster, err := clusterClient.ClusterV1().SpokeClusters().Get(context.TODO(), spokeClusterName, metav1.GetOptions{})
if err != nil {
return nil, err
}
return spokeCluster, nil
}
func AcceptSpokeCluster(clusterClient clusterclientset.Interface, spokeClusterName string) error {
spokeCluster, err := GetSpokeCluster(clusterClient, spokeClusterName)
if err != nil {
return err
}
spokeCluster.Spec.HubAcceptsClient = true
_, err = clusterClient.ClusterV1().SpokeClusters().Update(context.TODO(), spokeCluster, metav1.UpdateOptions{})
return err
}
func NewIntegrationTestEventRecorder(componet string) events.Recorder {
return &IntegrationTestEventRecorder{component: componet}
}
type IntegrationTestEventRecorder struct {
component string
}
func (r *IntegrationTestEventRecorder) ComponentName() string {
return r.component
}
func (r *IntegrationTestEventRecorder) ForComponent(c string) events.Recorder {
return &IntegrationTestEventRecorder{component: c}
}
func (r *IntegrationTestEventRecorder) WithComponentSuffix(suffix string) events.Recorder {
return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix))
}
func (r *IntegrationTestEventRecorder) Event(reason, message string) {
fmt.Fprintf(ginkgo.GinkgoWriter, "Event: [%s] %v: %v \n", r.component, reason, message)
}
func (r *IntegrationTestEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) {
r.Event(reason, fmt.Sprintf(messageFmt, args...))
}
func (r *IntegrationTestEventRecorder) Warning(reason, message string) {
fmt.Fprintf(ginkgo.GinkgoWriter, "Warning: [%s] %v: %v \n", r.component, reason, message)
}
func (r *IntegrationTestEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) {
r.Warning(reason, fmt.Sprintf(messageFmt, args...))
}